From: Aaron Rosen Date: Sat, 16 Feb 2013 06:08:10 +0000 (-0800) Subject: Rename admin_status_up to admin_state_up X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=0cc537cd3824e389cba4237a811eef9de799ee5d;p=openstack-build%2Fneutron-build.git Rename admin_status_up to admin_state_up This patch renames all instances of admin_status_up to admin_state_up since that is the correct name. There was also one instances of this for NVP which was renamed to admin_status_enabled since that is what the name is in NVP. Fixes bug 1126966 Change-Id: Ie74bc411082cb2de4d83b947df1f248facbd0d16 --- diff --git a/quantum/plugins/nec/nec_plugin.py b/quantum/plugins/nec/nec_plugin.py index 0149157c0..38dcfbad9 100644 --- a/quantum/plugins/nec/nec_plugin.py +++ b/quantum/plugins/nec/nec_plugin.py @@ -47,7 +47,7 @@ class OperationalStatus: ACTIVE: The resource is available. DOWN: The resource is not operational. This might indicate - admin_status_up=False, or lack of OpenFlow info for the port. + admin_state_up=False, or lack of OpenFlow info for the port. BUILD: The plugin is creating the resource. ERROR: Some error occured. """ diff --git a/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py b/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py index f69502a17..0349f04b2 100644 --- a/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py +++ b/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py @@ -160,14 +160,14 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): def new_update_request(self, resource, data, id, fmt=None): return self._req('PUT', resource, data, fmt, id=id) - def _create_vip(self, fmt, name, pool_id, protocol, port, admin_status_up, + def _create_vip(self, fmt, name, pool_id, protocol, port, admin_state_up, expected_res_status=None, **kwargs): data = {'vip': {'name': name, 'subnet_id': self._subnet_id, 'pool_id': pool_id, 'protocol': protocol, 'port': port, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('description', 'address', 'session_persistence', 'connection_limit'): @@ -181,13 +181,13 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): return vip_res - def _create_pool(self, fmt, name, lb_method, protocol, admin_status_up, + def _create_pool(self, fmt, name, lb_method, protocol, admin_state_up, expected_res_status=None, **kwargs): data = {'pool': {'name': name, 'subnet_id': self._subnet_id, 'lb_method': lb_method, 'protocol': protocol, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('description'): if arg in kwargs and kwargs[arg] is not None: @@ -200,11 +200,11 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): return pool_res - def _create_member(self, fmt, address, port, admin_status_up, + def _create_member(self, fmt, address, port, admin_state_up, expected_res_status=None, **kwargs): data = {'member': {'address': address, 'port': port, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('weight', 'pool_id'): if arg in kwargs and kwargs[arg] is not None: @@ -218,13 +218,13 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): return member_res def _create_health_monitor(self, fmt, type, delay, timeout, max_retries, - admin_status_up, expected_res_status=None, + admin_state_up, expected_res_status=None, **kwargs): data = {'health_monitor': {'type': type, 'delay': delay, 'timeout': timeout, 'max_retries': max_retries, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('http_method', 'path', 'expected_code'): if arg in kwargs and kwargs[arg] is not None: @@ -271,7 +271,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): @contextlib.contextmanager def vip(self, fmt=None, name='vip1', pool=None, - protocol='HTTP', port=80, admin_status_up=True, no_delete=False, + protocol='HTTP', port=80, admin_state_up=True, no_delete=False, address="172.16.1.123", **kwargs): if not fmt: fmt = self.fmt @@ -283,7 +283,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): pool_id, protocol, port, - admin_status_up, + admin_state_up, address=address, **kwargs) vip = self.deserialize(res) @@ -299,7 +299,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): pool_id, protocol, port, - admin_status_up, + admin_state_up, address=address, **kwargs) vip = self.deserialize(res) @@ -311,7 +311,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): @contextlib.contextmanager def pool(self, fmt=None, name='pool1', lb_method='ROUND_ROBIN', - protocol='HTTP', admin_status_up=True, no_delete=False, + protocol='HTTP', admin_state_up=True, no_delete=False, **kwargs): if not fmt: fmt = self.fmt @@ -319,7 +319,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): name, lb_method, protocol, - admin_status_up, + admin_state_up, **kwargs) pool = self.deserialize(res) if res.status_int >= 400: @@ -330,14 +330,14 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): @contextlib.contextmanager def member(self, fmt=None, address='192.168.1.100', - port=80, admin_status_up=True, no_delete=False, + port=80, admin_state_up=True, no_delete=False, **kwargs): if not fmt: fmt = self.fmt res = self._create_member(fmt, address, port, - admin_status_up, + admin_state_up, **kwargs) member = self.deserialize(res) if res.status_int >= 400: @@ -349,7 +349,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): @contextlib.contextmanager def health_monitor(self, fmt=None, type='TCP', delay=30, timeout=10, max_retries=3, - admin_status_up=True, + admin_state_up=True, no_delete=False, **kwargs): if not fmt: fmt = self.fmt @@ -358,7 +358,7 @@ class LoadBalancerPluginDbTestCase(testlib_api.WebTestCase): delay, timeout, max_retries, - admin_status_up, + admin_state_up, **kwargs) health_monitor = self.deserialize(res) the_health_monitor = health_monitor['health_monitor'] diff --git a/quantum/tests/unit/nicira/etc/fake_get_lswitch_lport_status.json b/quantum/tests/unit/nicira/etc/fake_get_lswitch_lport_status.json index 4836d3687..fa8e5f4da 100644 --- a/quantum/tests/unit/nicira/etc/fake_get_lswitch_lport_status.json +++ b/quantum/tests/unit/nicira/etc/fake_get_lswitch_lport_status.json @@ -16,7 +16,7 @@ "_href": "/ws.v1/lswitch/%(ls_uuid)s"}, "link_status_up": false, "_schema": "/ws.v1/schema/LogicalSwitchPortStatus", - "admin_status_up": true, + "admin_status_enabled": true, "fabric_status_up": false, "type": "LogicalSwitchPortStatus" } diff --git a/quantum/tests/unit/nicira/test_nicira_plugin.py b/quantum/tests/unit/nicira/test_nicira_plugin.py index ae47fb9a4..35b01ed4a 100644 --- a/quantum/tests/unit/nicira/test_nicira_plugin.py +++ b/quantum/tests/unit/nicira/test_nicira_plugin.py @@ -41,10 +41,10 @@ class NiciraPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase): _plugin_name = ('%s.QuantumPlugin.NvpPluginV2' % NICIRA_PKG_PATH) - def _create_network(self, fmt, name, admin_status_up, + def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, **kwargs): data = {'network': {'name': name, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} attributes = kwargs if providernet_args: diff --git a/quantum/tests/unit/test_db_plugin.py b/quantum/tests/unit/test_db_plugin.py index 91b4f1bbb..a2c86e2a5 100644 --- a/quantum/tests/unit/test_db_plugin.py +++ b/quantum/tests/unit/test_db_plugin.py @@ -206,10 +206,10 @@ class QuantumDbPluginV2TestCase(testlib_api.WebTestCase): req.environ['quantum.context'] = kwargs['context'] return req.get_response(self.api) - def _create_network(self, fmt, name, admin_status_up, + def _create_network(self, fmt, name, admin_state_up, arg_list=None, **kwargs): data = {'network': {'name': name, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in (('admin_state_up', 'tenant_id', 'shared') + (arg_list or ())): @@ -225,8 +225,8 @@ class QuantumDbPluginV2TestCase(testlib_api.WebTestCase): return network_req.get_response(self.api) def _create_network_bulk(self, fmt, number, name, - admin_status_up, **kwargs): - base_data = {'network': {'admin_state_up': admin_status_up, + admin_state_up, **kwargs): + base_data = {'network': {'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'network', base_data, **kwargs) @@ -316,14 +316,14 @@ class QuantumDbPluginV2TestCase(testlib_api.WebTestCase): return port_res def _create_port_bulk(self, fmt, number, net_id, name, - admin_status_up, **kwargs): + admin_state_up, **kwargs): base_data = {'port': {'network_id': net_id, - 'admin_state_up': admin_status_up, + 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'port', base_data, **kwargs) - def _make_network(self, fmt, name, admin_status_up, **kwargs): - res = self._create_network(fmt, name, admin_status_up, **kwargs) + def _make_network(self, fmt, name, admin_state_up, **kwargs): + res = self._create_network(fmt, name, admin_state_up, **kwargs) # TODO(salvatore-orlando): do exception handling in this test module # in a uniform way (we do it differently for ports, subnets, and nets # Things can go wrong - raise HTTP exc with res code only @@ -441,12 +441,12 @@ class QuantumDbPluginV2TestCase(testlib_api.WebTestCase): @contextlib.contextmanager def network(self, name='net1', - admin_status_up=True, + admin_state_up=True, fmt=None, do_delete=True, **kwargs): network = self._make_network(fmt or self.fmt, name, - admin_status_up, **kwargs) + admin_state_up, **kwargs) try: yield network finally: @@ -851,7 +851,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s def test_delete_network_port_exists_owned_by_network(self): res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner='network:dhcp') @@ -991,7 +991,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s with mock.patch.object(quantum.db.db_base_plugin_v2.QuantumDbPluginV2, '_generate_mac', new=fake_gen_mac): res = self._create_network(fmt=self.fmt, name='net1', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id) @@ -1047,7 +1047,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s with self.port(subnet=subnet) as port: # Create new network res = self._create_network(fmt=self.fmt, name='net2', - admin_status_up=True) + admin_state_up=True) network2 = self.deserialize(self.fmt, res) subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1", "1.1.1.0/24", ip_version=4) @@ -1757,7 +1757,7 @@ class TestNetworksV2(QuantumDbPluginV2TestCase): def test_list_networks_with_parameters(self): with contextlib.nested(self.network(name='net1', - admin_status_up=False), + admin_state_up=False), self.network(name='net2')) as (net1, net2): query_params = 'admin_state_up=False' self._test_list_resources('network', [net1], @@ -1779,7 +1779,7 @@ class TestNetworksV2(QuantumDbPluginV2TestCase): def test_list_networks_with_parameters_invalid_values(self): with contextlib.nested(self.network(name='net1', - admin_status_up=False), + admin_state_up=False), self.network(name='net2')) as (net1, net2): req = self.new_list_request('networks', params='admin_state_up=fake') @@ -2002,7 +2002,7 @@ class TestSubnetsV2(QuantumDbPluginV2TestCase): cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) @@ -2015,7 +2015,7 @@ class TestSubnetsV2(QuantumDbPluginV2TestCase): cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] subnet = self._make_subnet(self.fmt, network, gateway_ip, @@ -2043,7 +2043,7 @@ class TestSubnetsV2(QuantumDbPluginV2TestCase): cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) @@ -2820,7 +2820,7 @@ class TestSubnetsV2(QuantumDbPluginV2TestCase): dns_nameservers = ['1.2.3.4'] # Create new network res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, @@ -2836,7 +2836,7 @@ class TestSubnetsV2(QuantumDbPluginV2TestCase): 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, @@ -2853,7 +2853,7 @@ class TestSubnetsV2(QuantumDbPluginV2TestCase): 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', - admin_status_up=True) + admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, diff --git a/quantum/tests/unit/test_l3_plugin.py b/quantum/tests/unit/test_l3_plugin.py index 1c0c79789..e905b3dc0 100644 --- a/quantum/tests/unit/test_l3_plugin.py +++ b/quantum/tests/unit/test_l3_plugin.py @@ -292,7 +292,7 @@ class TestL3NatPlugin(db_base_plugin_v2.QuantumDbPluginV2, class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase): - def _create_network(self, fmt, name, admin_status_up, **kwargs): + def _create_network(self, fmt, name, admin_state_up, **kwargs): """ Override the routine for allowing the router:external attribute """ # attributes containing a colon should be passed with # a double underscore @@ -302,7 +302,7 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase): arg_list = (l3.EXTERNAL,) return super(L3NatDBTestCase, self)._create_network(fmt, name, - admin_status_up, + admin_state_up, arg_list=arg_list, **new_args) @@ -380,10 +380,10 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase): return self.deserialize(self.fmt, res) @contextlib.contextmanager - def router(self, name='router1', admin_status_up=True, + def router(self, name='router1', admin_state_up=True, fmt=None, tenant_id=_uuid(), set_context=False): router = self._make_router(fmt or self.fmt, tenant_id, name, - admin_status_up, set_context) + admin_state_up, set_context) try: yield router finally: @@ -395,7 +395,7 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase): expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] - with self.router(name='router1', admin_status_up=True, + with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: for k, v in expected_value: self.assertEqual(router['router'][k], v) @@ -938,7 +938,7 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase): expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] - with self.router(name='router1', admin_status_up=True, + with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: