]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Fixed a bunch of typos throughout Neutron
authorBrandon Palm <bapalm@us.ibm.com>
Wed, 28 Oct 2015 19:51:25 +0000 (14:51 -0500)
committerBrandon Palm <bapalm@us.ibm.com>
Fri, 30 Oct 2015 13:54:41 +0000 (08:54 -0500)
Went through all of the docstrings in Neutron and did
some cleanup.  I'm sure there are bunch more that I have missed.

Change-Id: Ib29d2de1c580880c89ed4fd069e1515d0977a3e7

54 files changed:
neutron/agent/l2/extensions/qos.py
neutron/agent/linux/dhcp.py
neutron/agent/linux/ip_lib.py
neutron/agent/linux/pd.py
neutron/common/rpc.py
neutron/common/utils.py
neutron/db/l3_db.py
neutron/db/quota/driver.py
neutron/db/securitygroups_db.py
neutron/db/sqlalchemyutils.py
neutron/ipam/drivers/neutrondb_ipam/db_api.py
neutron/ipam/subnet_alloc.py
neutron/pecan_wsgi/hooks/quota_enforcement.py
neutron/pecan_wsgi/startup.py
neutron/plugins/brocade/NeutronPlugin.py
neutron/plugins/ml2/db.py
neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py
neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py
neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py
neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
neutron/plugins/ml2/managers.py
neutron/plugins/ml2/plugin.py
neutron/quota/__init__.py
neutron/services/l3_router/brocade/l3_router_plugin.py
neutron/tests/api/admin/test_agent_management.py
neutron/tests/api/admin/test_shared_network_extension.py
neutron/tests/api/test_fwaas_extensions.py
neutron/tests/api/test_subnetpools_negative.py
neutron/tests/api/test_vpnaas_extensions.py
neutron/tests/common/net_helpers.py
neutron/tests/functional/agent/linux/base.py
neutron/tests/functional/agent/test_l3_agent.py
neutron/tests/functional/db/test_migrations.py
neutron/tests/retargetable/base.py
neutron/tests/retargetable/client_fixtures.py
neutron/tests/tempest/common/credentials.py
neutron/tests/tempest/common/isolated_creds.py
neutron/tests/tempest/common/waiters.py
neutron/tests/unit/agent/l2/extensions/test_qos.py
neutron/tests/unit/agent/linux/test_iptables_firewall.py
neutron/tests/unit/api/test_extensions.py
neutron/tests/unit/api/v2/test_base.py
neutron/tests/unit/db/test_allowedaddresspairs_db.py
neutron/tests/unit/db/test_db_base_plugin_v2.py
neutron/tests/unit/dummy_plugin.py
neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
neutron/tests/unit/extensions/test_portsecurity.py
neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py
neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
neutron/tests/unit/plugins/ml2/test_security_group.py
neutron/tests/unit/quota/test_resource.py
neutron/tests/unit/test_wsgi.py
neutron/wsgi.py

index 7cb9111e29a54f1bcf909cbbd9adc894ef039116..8ff4704f25351c9cd7adabe05d38f6abfedb5be7 100644 (file)
@@ -41,7 +41,7 @@ class QosAgentDriver(object):
     """
 
     # Each QoS driver should define the set of rule types that it supports, and
-    # correspoding handlers that has the following names:
+    # corresponding handlers that has the following names:
     #
     # create_<type>
     # update_<type>
index 7c67f18d6d4efbd227996d84bc5117abe57b5b26..48c04e5deab4ff716a57d78ee08c23cd0787401d 100644 (file)
@@ -577,7 +577,7 @@ class Dnsmasq(DhcpLocalProcess):
             # Even with an infinite lease, a client may choose to renew a
             # previous lease on reboot or interface bounce so we should have
             # an entry for it.
-            # Dnsmasq timestamp format for an infinite lease is  is 0.
+            # Dnsmasq timestamp format for an infinite lease is 0.
             timestamp = 0
         else:
             timestamp = int(time.time()) + self.conf.dhcp_lease_duration
index a4a9d4fef2c4966395b3206df2ccd7edd59d6c87..a86955eef5741868ca0d715ef42750e9c05cdc5f 100644 (file)
@@ -320,7 +320,7 @@ class IpRuleCommand(IpCommandBase):
 
     @staticmethod
     def _make_canonical(ip_version, settings):
-        """Converts settings to a canonical represention to compare easily"""
+        """Converts settings to a canonical representation to compare easily"""
         def canonicalize_fwmark_string(fwmark_mask):
             """Reformats fwmark/mask in to a canonical form
 
index cfed4936f1b25349d54589c6b49182777cfa7dfe..e9be82a37c68b08fe1e155d4b011ad259fee92f7 100644 (file)
@@ -197,7 +197,7 @@ class PrefixDelegation(object):
                                            router['ns_name'],
                                            'link')
             # There is a delay before the LLA becomes active.
-            # This is because the kernal runs DAD to make sure LLA uniqueness
+            # This is because the kernel runs DAD to make sure LLA uniqueness
             # Spawn a thread to wait for the interface to be ready
             self._spawn_lla_thread(router['gw_interface'],
                                    router['ns_name'],
index 3037f5342f000f8ae1b31e6e4ec12ae318008e14..6d37c28ba06a4c472baf21f58569c13385c5432b 100644 (file)
@@ -221,7 +221,7 @@ class VoidConnection(object):
 
 # functions
 def create_connection(new=True):
-    # NOTE(salv-orlando): This is a clever interpreation of the factory design
+    # NOTE(salv-orlando): This is a clever interpretation of the factory design
     # patter aimed at preventing plugins from initializing RPC servers upon
     # initialization when they are running in the REST over HTTP API server.
     # The educated reader will perfectly be able that this a fairly dirty hack
index 63d83ab06618954386100f6315160d4001002ec7..f0b57e3793c799c33e792cfb97ffbd173f60a63c 100644 (file)
@@ -327,7 +327,7 @@ def get_random_string(length):
 
 def get_dhcp_agent_device_id(network_id, host):
     # Split host so as to always use only the hostname and
-    # not the domain name. This will guarantee consistentcy
+    # not the domain name. This will guarantee consistency
     # whether a local hostname or an fqdn is passed in.
     local_hostname = host.split('.')[0]
     host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname))
index 91245359977e10d2001115113264381f22ef0aa9..0f01040f5da257e19679d0fbacd7af166863f09a 100644 (file)
@@ -109,7 +109,7 @@ class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
     router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
     # Additional attribute for keeping track of the router where the floating
     # ip was associated in order to be able to ensure consistency even if an
-    # aysnchronous backend is unavailable when the floating IP is disassociated
+    # asynchronous backend is unavailable when the floating IP is disassociated
     last_known_router_id = sa.Column(sa.String(36))
     status = sa.Column(sa.String(16))
     router = orm.relationship(Router, backref='floating_ips')
index cc7a5425ddf1cfee75c472ab1e83d46d59238068..f61b1ada630876203f9b2745e5d4817c6a80f203 100644 (file)
@@ -82,7 +82,8 @@ class DbQuotaDriver(object):
         for quota in context.session.query(quota_models.Quota):
             tenant_id = quota['tenant_id']
 
-            # avoid setdefault() because only want to copy when actually req'd
+            # avoid setdefault() because only want to copy when actually
+            # required
             tenant_quota = all_tenant_quotas.get(tenant_id)
             if tenant_quota is None:
                 tenant_quota = tenant_default.copy()
@@ -148,9 +149,9 @@ class DbQuotaDriver(object):
         # concurrent reservations.
         # For this reason it might be advisable to handle contention using
         # this kind of locks and paying the cost of a write set certification
-        # failure when a mysql galera cluster is employed. Also, this class of
+        # failure when a MySQL Galera cluster is employed. Also, this class of
         # locks should be ok to use when support for sending "hotspot" writes
-        # to a single node will be avaialable.
+        # to a single node will be available.
         requested_resources = deltas.keys()
         with db_api.autonested_transaction(context.session):
             # get_tenant_quotes needs in input a dictionary mapping resource
@@ -179,7 +180,7 @@ class DbQuotaDriver(object):
                     context, plugin, tenant_id, resync_usage=False)) for
                 resource in requested_resources)
             # Adjust for expired reservations. Apparently it is cheaper than
-            # querying everytime for active reservations and counting overall
+            # querying every time for active reservations and counting overall
             # quantity of resources reserved
             expired_deltas = quota_api.get_reservations_for_resources(
                 context, tenant_id, requested_resources, expired=True)
@@ -211,7 +212,7 @@ class DbQuotaDriver(object):
 
     def commit_reservation(self, context, reservation_id):
         # Do not mark resource usage as dirty. If a reservation is committed,
-        # then the releveant resources have been created. Usage data for these
+        # then the relevant resources have been created. Usage data for these
         # resources has therefore already been marked dirty.
         quota_api.remove_reservation(context, reservation_id,
                                      set_dirty=False)
index db06af0c7fd37af94ff488184f9e195cf7aaeb21..f93e89ce451518897f61ae3e22700e39ae6bdf92 100644 (file)
@@ -636,7 +636,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
             **kwargs)
 
     def _extend_port_dict_security_group(self, port_res, port_db):
-        # Security group bindings will be retrieved from the sqlalchemy
+        # Security group bindings will be retrieved from the SQLAlchemy
         # model. As they're loaded eagerly with ports because of the
         # joined load they will not cause an extra query.
         security_group_ids = [sec_group_mapping['security_group_id'] for
index 466cb3fd961b44142394360a4f20cfbe5a8c3ab8..3cdb9ffa74bbab3e261410d298d202db50373ad9 100644 (file)
@@ -69,7 +69,7 @@ def paginate_query(query, model, limit, sorts, marker_obj=None):
             sort_key_attr = getattr(model, sort_key)
         except AttributeError:
             # Extension attribute doesn't support for sorting. Because it
-            # existed in attr_info, it will be catched at here
+            # existed in attr_info, it will be caught here
             msg = _("%s is invalid attribute for sort_key") % sort_key
             raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
         if isinstance(sort_key_attr.property, properties.RelationshipProperty):
index 223fb1c3484441660fd068aab51097ab7dff1f20..58bb8367afb52b6b67579f9579edf11144e852c7 100644 (file)
@@ -152,7 +152,7 @@ class IpamSubnetManager(object):
 
     def create_range(self, session, allocation_pool_id,
                      range_start, range_end):
-        """Create an availabilty range for a given pool.
+        """Create an availability range for a given pool.
 
         This method does not perform any validation on parameters; it simply
         persist data on the database.
index 29c8a970e010119e8df45724ebc4586e0ee1e87b..9711e89f8ff6370a96e554c1e14b7173421adb2f 100644 (file)
@@ -58,7 +58,7 @@ class SubnetAllocator(driver.Pool):
 
         # NOTE(cbrandily): the update disallows 2 concurrent subnet allocation
         # to succeed: at most 1 transaction will succeed, others will be
-        # rollbacked and be caught in neutron.db.v2.base
+        # rolled back and be caught in neutron.db.v2.base
         query = self._context.session.query(models_v2.SubnetPool).filter_by(
             id=self._subnetpool['id'], hash=current_hash)
         count = query.update({'hash': new_hash})
index e1e4a0b6104503f004daa6afea836847f0d8f566..b6b04e3b5fa808da9aabf5dd8bd80f99ebc9aa8a 100644 (file)
@@ -29,7 +29,7 @@ class QuotaEnforcementHook(hooks.PecanHook):
     priority = 130
 
     def before(self, state):
-        # TODO(salv-orlando): This hook must go when adaptin the pecan code to
+        # TODO(salv-orlando): This hook must go when adapting the pecan code to
         # use reservations.
         if state.request.method != 'POST':
             return
index bdb87e2aedf8a559cb8e633178858e8dd373595e..5bf259c6d57e0378c35bc465661b8b6ceba32b05 100644 (file)
@@ -104,7 +104,7 @@ def initialize_all():
     # NOTE(salv-orlando): If you are care about code quality, please read below
     # Hackiness is strong with the piece of code below. It is used for
     # populating resource plurals and registering resources with the quota
-    # engine, but the method it calls were not coinceived with this aim.
+    # engine, but the method it calls were not conceived with this aim.
     # Therefore it only leverages side-effects from those methods. Moreover,
     # as it is really not advisable to load an instance of
     # neutron.api.v2.router.APIRouter just to register resources with the
index 306f84a28aec5e6e02f305de0003fbd3f66a9047..426f00c614dbf7492641a687cc83053d78c55542 100644 (file)
@@ -17,7 +17,7 @@
 # TODO(shiv) need support for security groups
 
 
-"""Implentation of Brocade Neutron Plugin."""
+"""Implementation of Brocade Neutron Plugin."""
 
 from oslo_config import cfg
 from oslo_log import log as logging
index 385167d40daaa8e0efd9a8363292f70b242ff84a..a18319c7eeb6427d179818af072019bb0d7fd4fd 100644 (file)
@@ -227,7 +227,7 @@ def delete_dvr_port_binding_if_stale(session, binding):
 
 
 def get_port(session, port_id):
-    """Get port record for update within transcation."""
+    """Get port record for update within transaction."""
 
     with session.begin(subtransactions=True):
         try:
index a9953dedcad736119e42f3f29a47ed9a94d77c87..3d146054b2682b3634934e63774a172ada216a6f 100644 (file)
@@ -14,7 +14,7 @@
 #    under the License.
 
 
-"""Implentation of Brocade ML2 Mechanism driver for ML2 Plugin."""
+"""Implementation of Brocade ML2 Mechanism driver for ML2 Plugin."""
 
 from oslo_config import cfg
 
index f648c1d93243a78ca0a35b2e4891f9624010d953..e45c8b83f1a6a62493244b9b06a808cf53c0c211 100644 (file)
@@ -56,7 +56,7 @@ def setup_arp_spoofing_protection(vif, port_details):
 
 
 def chain_name(vif):
-    # start each chain with a common identifer for cleanup to find
+    # start each chain with a common identifier for cleanup to find
     return '%s%s' % (SPOOF_CHAIN_PREFIX, vif)
 
 
index f497c57663c06c0131fa301ebc64176cd33dfce7..65120515617cd34e4e41bb0cf2219087de247e72 100644 (file)
@@ -1030,7 +1030,7 @@ class LinuxBridgeNeutronAgentRPC(service.Service):
     def scan_devices(self, previous, sync):
         device_info = {}
 
-        # Save and reinitialise the set variable that the port_update RPC uses.
+        # Save and reinitialize the set variable that the port_update RPC uses.
         # This should be thread-safe as the greenthread should not yield
         # between these two statements.
         updated_devices = self.updated_devices
index f69b5d6a90a1412bcc5a3e298681bd604828844a..b897a4cac59335c0ab596e0365da39a0aea4e30a 100644 (file)
@@ -34,7 +34,7 @@ cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
 # that subnet
 class LocalDVRSubnetMapping(object):
     def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
-        # set of commpute ports on on this dvr subnet
+        # set of compute ports on this dvr subnet
         self.compute_ports = {}
         self.subnet = subnet
         self.csnat_ofport = csnat_ofport
index 085c093ed97e46a17075c4609f7893e2e867eae6..4ff2d23677991419bcbb9424ead68b931b6b1199 100644 (file)
@@ -556,7 +556,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
                 br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
                                         lvm.tun_ofports)
             else:
-                # This local vlan doesn't require any more tunnelling
+                # This local vlan doesn't require any more tunneling
                 br.delete_flood_to_tun(lvm.vlan)
         else:
             self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
@@ -1297,7 +1297,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
 
         self.tun_br_ofports[tunnel_type][remote_ip] = ofport
         # Add flow in default table to resubmit to the right
-        # tunnelling table (lvid will be set in the latter)
+        # tunneling table (lvid will be set in the latter)
         br.setup_tunnel_port(tunnel_type, ofport)
 
         ofports = self.tun_br_ofports[tunnel_type].values()
index 89d742c67b1dd956022f6fb764a33fadd953597c..bc72255e1f70e9d450dcd0cbd3ed5dc63a818d1a 100644 (file)
@@ -749,7 +749,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
         # To prevent a possible binding loop, don't try to bind with
         # this driver if the same driver has already bound at a higher
         # level to one of the segments we are currently trying to
-        # bind. Note that is is OK for the same driver to bind at
+        # bind. Note that it is OK for the same driver to bind at
         # multiple levels using different segments.
         for level in binding_levels:
             if (level.driver == driver and
index e7e0b9dc20139d92af0a591e8f17b27d72271808..a2ee1de6aace8b4cb15ba2935f96e8053a369706 100644 (file)
@@ -1238,7 +1238,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
         if original_port['admin_state_up'] != updated_port['admin_state_up']:
             need_port_update_notify = True
         # NOTE: In the case of DVR ports, the port-binding is done after
-        # router scheduling when sync_routers is callede and so this call
+        # router scheduling when sync_routers is called and so this call
         # below may not be required for DVR routed interfaces. But still
         # since we don't have the mech_context for the DVR router interfaces
         # at certain times, we just pass the port-context and return it, so
index df54d9f9128b432f959935c18f2809ff449a618f..e5a56009bcd4dbb8cfa25ada0d4b0626eb227d44 100644 (file)
@@ -175,10 +175,10 @@ class ConfDriver(object):
         return quota_api.ReservationInfo('fake', None, None, None)
 
     def commit_reservation(self, context, reservation_id):
-        """Tnis is a noop as this driver does not support reservations."""
+        """This is a noop as this driver does not support reservations."""
 
     def cancel_reservation(self, context, reservation_id):
-        """Tnis is a noop as this driver does not support reservations."""
+        """This is a noop as this driver does not support reservations."""
 
 
 class QuotaEngine(object):
index c4b3a678097503456321f6f3d6a0bc63a3221b1c..7ef000cbdcccdc87d42ced6424b654d03dfc1232 100644 (file)
@@ -15,7 +15,7 @@
 #
 
 
-"""Implentation of Brocade SVI service Plugin."""
+"""Implementation of Brocade SVI service Plugin."""
 
 from oslo_config import cfg
 
index 4136c3e992a7d6a5974bc0233348886c163b7a92..228fdc63025ba8d6574d7dc40b8a16e2833d5efc 100644 (file)
@@ -35,7 +35,7 @@ class AgentManagementTestJSON(base.BaseAdminNetworkTest):
     def test_list_agent(self):
         body = self.admin_client.list_agents()
         agents = body['agents']
-        # Hearthbeats must be excluded from comparison
+        # Heartbeats must be excluded from comparison
         self.agent.pop('heartbeat_timestamp', None)
         self.agent.pop('configurations', None)
         for agent in agents:
index 1979e25622a27abf797d944b247abfd629cdd6ff..8a30c6a9bdcd2654791d6f2276dd0a1d04138177 100644 (file)
@@ -253,11 +253,11 @@ class RBACSharedNetworksTest(base.BaseAdminNetworkTest):
             action='access_as_shared', target_tenant='*')['rbac_policy']
         self.admin_client.delete_rbac_policy(res['policy']['id'])
 
-        # now that wilcard is the only remainin, it should be subjected to
+        # now that wildcard is the only remaining, it should be subjected to
         # to the same restriction
         with testtools.ExpectedException(lib_exc.Conflict):
             self.admin_client.delete_rbac_policy(wild['id'])
-        # similarily, we can't update the policy to a different tenant
+        # similarly, we can't update the policy to a different tenant
         with testtools.ExpectedException(lib_exc.Conflict):
             self.admin_client.update_rbac_policy(
                 wild['id'], target_tenant=self.client2.tenant_id)
index 3755196fd981067991c20f7a1f720075ff477770..55745c4cf403526f7bcd2347de174bc3d41c1c84 100644 (file)
@@ -331,7 +331,7 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
         self.client.insert_firewall_rule_in_policy(
             fw_policy_id, fw_rule_id2, fw_rule_id1, '')
 
-        # Verify the posiition of rule after insertion
+        # Verify the position of rule after insertion
         fw_rule = self.client.show_firewall_rule(
             fw_rule_id2)
 
@@ -342,7 +342,7 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
         # Insert rule to firewall policy before the first rule
         self.client.insert_firewall_rule_in_policy(
             fw_policy_id, fw_rule_id2, '', fw_rule_id1)
-        # Verify the posiition of rule after insertion
+        # Verify the position of rule after insertion
         fw_rule = self.client.show_firewall_rule(
             fw_rule_id2)
         self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
@@ -380,7 +380,7 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
         fw_policy_id = body['firewall_policy']['id']
         self.addCleanup(self._try_delete_policy, fw_policy_id)
         self.assertFalse(body['firewall_policy']['audited'])
-        # Update firewall policy audited attribute to ture
+        # Update firewall policy audited attribute to true
         self.client.update_firewall_policy(fw_policy_id,
                                            audited=True)
         # Insert Firewall rule to firewall policy
index 76e9ff51a474aaeda8d77640998eaca6f76e16fb..bb00d3a42fb16b4c736c5d3f98163224ead46a9f 100644 (file)
@@ -251,7 +251,7 @@ class SubnetPoolsNegativeTestJSON(base.BaseNetworkTest):
 
         self.addCleanup(self.client.delete_subnetpool, pool_id_2)
 
-        # now update the pool_id_1 with the prefix interesecting with
+        # now update the pool_id_1 with the prefix intersecting with
         # pool_id_2
         subnetpool_data = {'subnetpool': {'prefixes':
                                           pool_1_updated_prefixes}}
index 6076e52bfb61d2c8f3be516b16a728c167aecf94..e64545434d0b3feea4cdccb67aae38ceb1bc44b3 100644 (file)
@@ -96,7 +96,7 @@ class VPNaaSTestJSON(base.BaseAdminNetworkTest):
         Returns the tenant_id of the client current user
         """
         # TODO(jroovers) This is a temporary workaround to get the tenant_id
-        # of the the current client. Replace this once tenant_isolation for
+        # of the current client. Replace this once tenant_isolation for
         # neutron is fixed.
         body = self.client.show_network(self.network['id'])
         return body['network']['tenant_id']
index 7750ecc7e273e919bfa299615465e370d9705152..fe4abdf46e13b6c4997daac551e0583828c34bb0 100644 (file)
@@ -325,7 +325,7 @@ class NetcatTester(object):
             address=self.address)
         if self.protocol == self.UDP:
             # Create an ASSURED entry in conntrack table for UDP packets,
-            # that requires 3-way communcation
+            # that requires 3-way communication
             # 1st transmission creates UNREPLIED
             # 2nd transmission removes UNREPLIED
             # 3rd transmission creates ASSURED
index cb3b63046e213b682f1dd1d38a8a986021df136b..8ed1d47be5da9a6cab10c34a6623a288b1289993 100644 (file)
@@ -31,7 +31,7 @@ get_rand_name = tests_base.get_rand_name
 
 
 # Regarding MRO, it goes BaseOVSLinuxTestCase, WithScenarios,
-# BaseSudoTestCase, ..., UnitTest, object. setUp is not dfined in
+# BaseSudoTestCase, ..., UnitTest, object. setUp is not defined in
 # WithScenarios, so it will correctly be found in BaseSudoTestCase.
 class BaseOVSLinuxTestCase(testscenarios.WithScenarios, base.BaseSudoTestCase):
     scenarios = [
index 1e1e8fa05496f044d097a17c1f1e9d839f900d63..ae0ad6cf072f34865dbc7a5acb7b4891d641d9c6 100644 (file)
@@ -565,7 +565,7 @@ class L3AgentTestCase(L3AgentTestFramework):
         # clear agent router_info as it will be after restart
         self.agent.router_info = {}
 
-        # Synchonize the agent with the plug-in
+        # Synchronize the agent with the plug-in
         with mock.patch.object(namespace_manager.NamespaceManager, 'list_all',
                                return_value=ns_names_to_retrieve):
             self.agent.periodic_sync_routers_task(self.agent.context)
@@ -1402,7 +1402,7 @@ class TestDvrRouter(L3AgentTestFramework):
 
     def test_dvr_router_add_internal_network_set_arp_cache(self):
         # Check that, when the router is set up and there are
-        # existing ports on the the uplinked subnet, the ARP
+        # existing ports on the uplinked subnet, the ARP
         # cache is properly populated.
         self.agent.conf.agent_mode = 'dvr_snat'
         router_info = l3_test_common.prepare_router_data()
index 83047f2dd28818c1e23deb3fe227f568a74e23f5..b46f24931833b2146f4132823273bbd8e64972dc 100644 (file)
@@ -256,7 +256,7 @@ class TestModelsMigrationsMysql(_TestModelsMigrations,
             migration.do_alembic_command(self.alembic_config, 'upgrade',
                                          'heads')
             insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
-            # Test that table creation on mysql only builds InnoDB tables
+            # Test that table creation on MySQL only builds InnoDB tables
             tables = insp.get_table_names()
             self.assertTrue(len(tables) > 0,
                             "No tables found. Wrong schema?")
index 98d9d1f854710d62671c600673c142c3fe8c8d33..6b5c5f0a9b0e0fbe76f6bbe4ff1b8a694e769abf 100644 (file)
@@ -60,7 +60,7 @@ def get_plugin_scenarios():
 def get_scenarios():
     if rest_enabled():
         # FIXME(marun) Remove local import once tempest config is safe
-        # to import alonside neutron config
+        # to import alongside neutron config
         from neutron.tests.retargetable import rest_fixture
         return [('tempest', {'client': rest_fixture.RestClientFixture()})]
     else:
index 102338eceebe5d801222276a4ff0c89d13f032e2..6c4efb75072adcb4e45192c42b6e5dc623499c8d 100644 (file)
@@ -93,7 +93,7 @@ class PluginClientFixture(AbstractClientFixture):
 
     def create_network(self, **kwargs):
         # Supply defaults that are expected to be set by the api
-        # framwork
+        # framework
         kwargs.setdefault('admin_state_up', True)
         kwargs.setdefault('shared', False)
         data = dict(network=kwargs)
index 9ae6ee8bd014cedf9d731ec7b98c88cc777feee2..9dfdff0d037cbf2575adf4e81de4a4a60c04227d 100644 (file)
@@ -45,7 +45,7 @@ def get_isolated_credentials(name, network_resources=None,
 
 # We want a helper function here to check and see if admin credentials
 # are available so we can do a single call from skip_checks if admin
-# creds arevailable.
+# creds are available.
 def is_admin_available():
     is_admin = True
     # If tenant isolation is enabled admin will be available
index 5da24a922642b799d04edfc9fee38cd376d9b251..163ce8aded369f04e45adf4a0f20e9fa7b46263a 100644 (file)
@@ -297,7 +297,7 @@ class IsolatedCreds(cred_provider.CredentialProvider):
             new_index = str(roles) + '-' + str(len(self.isolated_creds))
             self.isolated_creds[new_index] = exist_creds
             del self.isolated_creds[str(roles)]
-            # Handle isolated neutron resouces if they exist too
+            # Handle isolated neutron resources if they exist too
             if CONF.service_available.neutron:
                 exist_net = self.isolated_net_resources.get(str(roles))
                 if exist_net:
index 7d97d5d06fd91d2bb887420035bc14c262c4463f..caa9b37b141ff904f4c81df3fab35c9cb2654e45 100644 (file)
@@ -50,7 +50,7 @@ def wait_for_server_status(client, server_id, status, ready_wait=True,
                     return
                 # NOTE(afazekas): The instance is in "ready for action state"
                 # when no task in progress
-                # NOTE(afazekas): Converted to string bacuse of the XML
+                # NOTE(afazekas): Converted to string because of the XML
                 # responses
                 if str(task_state) == "None":
                     # without state api extension 3 sec usually enough
index 99d31c5d0354cc7c43a479594b22e1a0de6b5db7..d0a6206f0bf2ebd532cfa21bcd41305934f25bbb 100755 (executable)
@@ -154,7 +154,7 @@ class QosExtensionRpcTestCase(QosExtensionBaseTestCase):
         qos_policy_id = port['qos_policy_id']
         port_id = port['port_id']
         self.qos_ext.handle_port(self.context, port)
-        # we make sure the underlaying qos driver is called with the
+        # we make sure the underlying qos driver is called with the
         # right parameters
         self.qos_ext.qos_driver.create.assert_called_once_with(
             port, TEST_POLICY)
index 82307ad0814cfde0b807b119412cb01fb0317bb2..cdc9b6151aaf50b0e79e2d7e91da7b22c84fcf52 100644 (file)
@@ -1299,7 +1299,7 @@ class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
     def test_remove_unknown_port(self):
         port = self._fake_port()
         self.firewall.remove_port_filter(port)
-        # checking no exception occures
+        # checking no exception occurs
         self.assertFalse(self.v4filter_inst.called)
 
     def test_defer_apply(self):
@@ -1857,7 +1857,7 @@ class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
     def setUp(self):
         super(OVSHybridIptablesFirewallTestCase, self).setUp()
         self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver()
-        # inital data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
+        # initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
         self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
                               '95c24827-02': 2, 'e804433b-61': 1}
 
@@ -1865,7 +1865,7 @@ class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
         self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
 
     def test__generate_device_zone(self):
-        # inital data has 1, 2, and 9 in use.
+        # initial data has 1, 2, and 9 in use.
         # we fill from top up first.
         self.assertEqual(10, self.firewall._generate_device_zone('test'))
 
index c8fea09c9870ddb756a7f9c6074999d96f97934f..4fa31c404819e53d2d14de62b9d6e1fc345004f1 100644 (file)
@@ -81,7 +81,7 @@ class ExtensionPathTest(base.BaseTestCase):
                          '%s:neutron/tests/unit/extensions' % self.base_path)
 
     def test_get_extensions_path_no_extensions(self):
-        # Reset to default value, as it's overriden by base class
+        # Reset to default value, as it's overridden by base class
         cfg.CONF.set_override('api_extensions_path', '')
         path = extensions.get_extensions_path()
         self.assertEqual(path, self.base_path)
index 8507df9f91f272ed439839ed26521bf1d742b211..88c2871c3293e6dcdfa65f85db2edb1f81c91de1 100644 (file)
@@ -1390,7 +1390,7 @@ class QuotaTest(APIv2TestBase):
         super(QuotaTest, self).setUp()
         # Use mock to let the API use a different QuotaEngine instance for
         # unit test in this class. This will ensure resource are registered
-        # again and instanciated with neutron.quota.resource.CountableResource
+        # again and instantiated with neutron.quota.resource.CountableResource
         replacement_registry = resource_registry.ResourceRegistry()
         registry_patcher = mock.patch('neutron.quota.resource_registry.'
                                       'ResourceRegistry.get_instance')
index ecf2670f6213bb06a724b5d50f0293c899ea8b25..c62246521eb05146ea459dc7592fd8080d35040d 100644 (file)
@@ -80,7 +80,7 @@ class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
             ret_port.update(port['port'])
 
             if (delete_addr_pairs or has_addr_pairs):
-                # delete address pairds and readd them
+                # delete address pairs and readd them
                 self._delete_allowed_address_pairs(context, id)
                 self._process_create_allowed_address_pairs(
                     context, ret_port,
index 6d57248dc44a4fe8c958dcfca388013a340ea24b..4453049b4e95c74f05f90d17967668b06519546b 100644 (file)
@@ -118,7 +118,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
         cfg.CONF.set_override('allow_pagination', True)
         cfg.CONF.set_override('allow_sorting', True)
         self.api = router.APIRouter()
-        # Set the defualt status
+        # Set the default status
         self.net_create_status = 'ACTIVE'
         self.port_create_status = 'ACTIVE'
 
index c658683d15be96c51a4507f3012e556bcaa05b71..f050a81a48f43550fb4cf2f2aeff86004ea334ce 100644 (file)
@@ -80,7 +80,7 @@ class Dummy(object):
 
 
 class DummyServicePlugin(service_base.ServicePluginBase):
-    """This is a simple plugin for managing instantes of a fictional 'dummy'
+    """This is a simple plugin for managing instances of a fictional 'dummy'
         service. This plugin is provided as a proof-of-concept of how
         advanced service might leverage the service type extension.
         Ideally, instances of real advanced services, such as load balancing
index ae811d7ae5ff28c77b2844ddf5118791d31b199c..83f2d13c4f2b525c6b9c8ad17882df835d4c960b 100644 (file)
@@ -130,7 +130,7 @@ class TestL3GwModeMixin(testlib_api.SqlTestCase):
         self.net_ext = external_net_db.ExternalNetwork(
             network_id=self.ext_net_id)
         self.context.session.add(self.network)
-        # The following is to avoid complains from sqlite on
+        # The following is to avoid complaints from SQLite on
         # foreign key violations
         self.context.session.flush()
         self.context.session.add(self.net_ext)
index c307d771e00588717cc2ac989ed2ce48faeaa0b7..ed3deb8d39c78ffa7c98c979935098581016152c 100644 (file)
@@ -267,7 +267,7 @@ class TestPortSecurity(PortSecurityDBTestCase):
     def test_create_port_with_security_group_and_net_sec_false(self):
         # This tests that port_security_enabled is true when creating
         # a port on a network that is marked as port_security_enabled=False
-        # that has a subnet and securiy_groups are passed it.
+        # that has a subnet and security_groups are passed it.
         if self._skip_security_group:
             self.skipTest("Plugin does not support security groups")
         res = self._create_network('json', 'net1', True,
index 5a3f6d6e9cb8df18e7d088debd8fa904cb16d8da..fb9eedadf3000b0e6e0827774243bccdcb247048 100644 (file)
@@ -201,7 +201,7 @@ class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
 
 class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
                               TestNeutronDbIpamMixin):
-    """Test case for Subnet interface for Nuetron's DB IPAM driver.
+    """Test case for Subnet interface for Neutron's DB IPAM driver.
 
     This test case exercises the reference IPAM driver.
     Even if it loads a plugin, the unit tests in this class do not exercise
index 78d635338581b6c57207ac1d96e39b3465888ece..7faa5e138b2489ae75741a200152eece4e18de7f 100644 (file)
@@ -200,7 +200,7 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
             val = res['networks'][0]['network_extension']
             self.assertEqual("", val)
 
-        # Test create with explict value.
+        # Test create with explicit value.
         res = self._create_network(self.fmt,
                                    'test-network', True,
                                    arg_list=('network_extension', ),
@@ -238,7 +238,7 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
             self.assertEqual("", val)
 
         with self.network() as network:
-            # Test create with explict value.
+            # Test create with explicit value.
             data = {'subnet':
                     {'network_id': network['network']['id'],
                      'cidr': '10.1.0.0/24',
@@ -280,7 +280,7 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
             self.assertEqual("", val)
 
         with self.network() as network:
-            # Test create with explict value.
+            # Test create with explicit value.
             res = self._create_port(self.fmt,
                                     network['network']['id'],
                                     arg_list=('port_extension', ),
index a9b92201371223923bc5cd49e941906cc775710d..b1f1e5d8e3f06d383f76f2a554d3350c1d1585aa 100644 (file)
@@ -136,7 +136,7 @@ class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
     def test_full_uuids_skip_port_id_lookup(self):
         plugin = manager.NeutronManager.get_plugin()
         # when full UUIDs are provided, the _or statement should only
-        # have one matching 'IN' critiera for all of the IDs
+        # have one matching 'IN' criteria for all of the IDs
         with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\
                 mock.patch('sqlalchemy.orm.Session.query') as qmock:
             fmock = qmock.return_value.outerjoin.return_value.filter
index e45ccc59e8999344ef15b610d4dd3d3a272b5a45..2811e3f877400834bb6a47f9022c9121acf0fc6a 100644 (file)
@@ -186,7 +186,7 @@ class TestTrackedResource(testlib_api.SqlTestCaseLight):
     def test_add_delete_data_triggers_event(self):
         res = self._create_resource()
         other_res = self._create_other_resource()
-        # Validate dirty tenants since mock does not work well with sqlalchemy
+        # Validate dirty tenants since mock does not work well with SQLAlchemy
         # event handlers.
         self._add_data()
         self._add_data('someone_else')
index 190299967777a93a46770c2575763990740c8d99..02166549e72a089f3ad7875d06b9662966f02a82 100644 (file)
@@ -38,7 +38,7 @@ TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
 
 def open_no_proxy(*args, **kwargs):
     # NOTE(jamespage):
-    # Deal with more secure certification chain verficiation
+    # Deal with more secure certification chain verification
     # introduced in python 2.7.9 under PEP-0476
     # https://github.com/python/peps/blob/master/pep-0476.txt
     if hasattr(ssl, "_create_unverified_context"):
index eb5805fdacb9ed13991cbd1bbd9d2bbb75955a75..9fe21fbcb7dff0ccc310ec1526b2217300b43a82 100644 (file)
@@ -742,7 +742,7 @@ class Controller(object):
             raise webob.exc.HTTPNotAcceptable(msg)
 
     def _deserialize(self, data, content_type):
-        """Deserialize the request body to the specefied content type.
+        """Deserialize the request body to the specified content type.
 
         Uses self._serialization_metadata if it exists, which is a dict mapping
         MIME types to information needed to serialize to that type.