]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Set pep8 version to 1.1 in test_requires
authorJohn Griffith <john.griffith@solidfire.com>
Fri, 1 Jun 2012 16:46:14 +0000 (10:46 -0600)
committerJohn Griffith <john.griffith@solidfire.com>
Fri, 1 Jun 2012 17:39:45 +0000 (11:39 -0600)
* Fixes bug 1007518
* Changes in pep8 cause new failures
* Fix up the ones we found anyway

Change-Id: I5cd73a252f73893e4672a2e39b667c519423ae3f

13 files changed:
cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py
cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py
cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py
cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py
cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py
cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py
tools/hacking.py
tools/test-requires

index da01940bd39d33d6a2f3afd5378cfcaf0424483f..e827b04cb2f6df6e1c5b36652887f882f2acc0b4 100644 (file)
@@ -31,8 +31,8 @@ def upgrade(migrate_engine):
                                       _warn_on_bytestring=False),
                                nullable=True)
     instances.create_column(instances_os_type)
-    migrate_engine.execute(instances.update()\
-                           .where(instances.c.os_type == None)\
+    migrate_engine.execute(instances.update()
+                           .where(instances.c.os_type is None)
                            .values(os_type='linux'))
 
 
index b363caca5e4df2dbd224f98739f3d44faf407a44..aa84de236c49ae0f2e322ac9fba1326bd654f3c2 100644 (file)
@@ -40,8 +40,8 @@ def upgrade(migrate_engine):
         type_names[row[0]] = row[1]
 
     for type_id, type_name in type_names.iteritems():
-        migrate_engine.execute(instances.update()\
-            .where(instances.c.instance_type == type_name)\
+        migrate_engine.execute(instances.update()
+            .where(instances.c.instance_type == type_name)
             .values(instance_type_id=type_id))
 
     instances.c.instance_type.drop()
@@ -67,8 +67,8 @@ def downgrade(migrate_engine):
         type_names[row[0]] = row[1]
 
     for type_id, type_name in type_names.iteritems():
-        migrate_engine.execute(instances.update()\
-            .where(instances.c.instance_type_id == type_id)\
+        migrate_engine.execute(instances.update()
+            .where(instances.c.instance_type_id == type_id)
             .values(instance_type=type_name))
 
     instances.c.instance_type_id.drop()
index 1c7081c4ad6e8ba2f6b88b13e709dadb7a72bf36..a32fbdcee80e6fb148b20a58db0e73b907ecb4ce 100644 (file)
@@ -90,7 +90,7 @@ def _assert_no_duplicate_project_ids(quotas):
 
 def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
     """Ensure that there are no duplicate non-deleted quota entries."""
-    select = quotas.select().where(quotas.c.deleted == False)
+    select = quotas.select().where(quotas.c.deleted is False)
     results = migrate_engine.execute(select)
     _assert_no_duplicate_project_ids(list(results))
 
@@ -99,7 +99,7 @@ def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
     """Ensure that there are no duplicate non-deleted quota entries."""
     for resource in resources:
         select = quotas.select().\
-                where(quotas.c.deleted == False).\
+                where(quotas.c.deleted is False).\
                 where(quotas.c.resource == resource)
         results = migrate_engine.execute(select)
         _assert_no_duplicate_project_ids(list(results))
index 313cb16de2c2dac84e9d370b512c066e478d08fc..e0de0176102ab6f129cb65515b21623ce5317235 100644 (file)
@@ -31,8 +31,8 @@ def upgrade(migrate_engine):
     rows = migrate_engine.execute(instances.select())
     for row in rows:
         instance_uuid = str(utils.gen_uuid())
-        migrate_engine.execute(instances.update()\
-                .where(instances.c.id == row[0])\
+        migrate_engine.execute(instances.update()
+                .where(instances.c.id == row[0])
                 .values(uuid=instance_uuid))
 
 
index a34baa83d4cb04bfcaae392a4a7bd30ac24da8a5..31d4950c0b93151e57f404ac20d6975e7d3b7f4e 100644 (file)
@@ -103,7 +103,7 @@ def upgrade(migrate_engine):
 
     # populate the fixed_ips virtual_interface_id column
     s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
-               fixed_ips.c.instance_id != None)
+               fixed_ips.c.instance_id is not None)
 
     for row in s.execute():
         m = select([virtual_interfaces.c.id]).\
index 38f83fc019294d8a896488eefa97082448f7551e..b27a5c6ba9b5b5b91634f6e6edaad44d860ad685 100644 (file)
@@ -36,11 +36,11 @@ def upgrade(migrate_engine):
         itypes[instance_type.id] = instance_type.flavorid
 
     for instance_type_id in itypes.keys():
-        migrate_engine.execute(migrations.update()\
-                .where(migrations.c.old_flavor_id == itypes[instance_type_id])\
+        migrate_engine.execute(migrations.update()
+                .where(migrations.c.old_flavor_id == itypes[instance_type_id])
                 .values(old_instance_type_id=instance_type_id))
-        migrate_engine.execute(migrations.update()\
-                .where(migrations.c.new_flavor_id == itypes[instance_type_id])\
+        migrate_engine.execute(migrations.update()
+                .where(migrations.c.new_flavor_id == itypes[instance_type_id])
                 .values(new_instance_type_id=instance_type_id))
 
     migrations.c.old_flavor_id.drop()
@@ -66,13 +66,13 @@ def downgrade(migrate_engine):
         itypes[instance_type.flavorid] = instance_type.id
 
     for instance_type_flavorid in itypes.keys():
-        migrate_engine.execute(migrations.update()\
+        migrate_engine.execute(migrations.update()
                 .where(migrations.c.old_instance_type_id ==
-                    itypes[instance_type_flavorid])\
+                    itypes[instance_type_flavorid])
                 .values(old_flavor_id=instance_type_flavorid))
-        migrate_engine.execute(migrations.update()\
+        migrate_engine.execute(migrations.update()
                 .where(migrations.c.new_instance_type_id ==
-                    itypes[instance_type_flavorid])\
+                    itypes[instance_type_flavorid])
                 .values(new_flavor_id=instance_type_flavorid))
 
     migrations.c.old_instance_type_id.drop()
index fbd1c45702cf2014a936ab5c50d4269030e557c7..4367be04ab8c7c09293151095c5033ae3a185188 100644 (file)
@@ -31,8 +31,8 @@ def upgrade(migrate_engine):
     rows = migrate_engine.execute(virtual_interfaces.select())
     for row in rows:
         vif_uuid = str(utils.gen_uuid())
-        migrate_engine.execute(virtual_interfaces.update()\
-                .where(virtual_interfaces.c.id == row[0])\
+        migrate_engine.execute(virtual_interfaces.update()
+                .where(virtual_interfaces.c.id == row[0])
                 .values(uuid=vif_uuid))
 
 
index 7125911d34d934593a3d24418c5f25ea70b1fdeb..57d0447120dec078c6d6a89d91b2f88ace9da7b2 100644 (file)
@@ -31,8 +31,8 @@ def upgrade(migrate_engine):
     rows = migrate_engine.execute(networks.select())
     for row in rows:
         networks_uuid = str(utils.gen_uuid())
-        migrate_engine.execute(networks.update()\
-                .where(networks.c.id == row[0])\
+        migrate_engine.execute(networks.update()
+                .where(networks.c.id == row[0])
                 .values(uuid=networks_uuid))
 
 
index 3d26204f06913d5075161e912d0e4835322cca53..28234df90a4944328cb8c10dc1d0a015d77ddecb 100644 (file)
@@ -51,11 +51,11 @@ def upgrade(migrate_engine):
     bw_usage_cache.create_column(mac_column)
 
     bw_usage_cache.update()\
-        .values(mac=select([vifs.c.address])\
+        .values(mac=select([vifs.c.address])
             .where(and_(
                     networks.c.label == bw_usage_cache.c.network_label,
                     networks.c.id == vifs.c.network_id,
-                    bw_usage_cache.c.instance_id == vifs.c.instance_id))\
+                    bw_usage_cache.c.instance_id == vifs.c.instance_id))
             .as_scalar()).execute()
 
     bw_usage_cache.c.network_label.drop()
@@ -87,11 +87,11 @@ def downgrade(migrate_engine):
     bw_usage_cache.create_column(network_label_column)
 
     bw_usage_cache.update()\
-        .values(network_label=select([network.c.label])\
+        .values(network_label=select([network.c.label])
             .where(and_(
                 network.c.id == vifs.c.network_id,
                vifs.c.address == bw_usage_cache.c.mac,
-               bw_usage_cache.c.instance_id == vifs.c.instance_id))\
+               bw_usage_cache.c.instance_id == vifs.c.instance_id))
             .as_scalar()).execute()
 
     bw_usage_cache.c.mac.drop()
index c6687ac8074bb7e3bb3b1446271d4ec0ebc83987..59c1c8a4d2df99c4124e7113c7aa8d59bf2449a3 100644 (file)
@@ -64,6 +64,6 @@ def downgrade(migrate_engine):
 
     for row in migrate_engine.execute(bw_usage_cache.select()):
         instance_id = cache[row['mac']]
-        migrate_engine.execute(bw_usage_cache.update()\
-                    .where(bw_usage_cache.c.id == row['id'])\
+        migrate_engine.execute(bw_usage_cache.update()
+                    .where(bw_usage_cache.c.id == row['id'])
                     .values(instance_id=instance_id))
index 11bc25b015834b34e784cda6f9febbce62791f25..675834a9d302cabf838cb7591beea0f97396cf1f 100644 (file)
@@ -81,7 +81,7 @@ def upgrade(migrate_engine):
 
     if migrate_engine.name == "mysql":
         migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB")
-        migrate_engine.execute("ALTER TABLE snapshot_id_mappings "\
+        migrate_engine.execute("ALTER TABLE snapshot_id_mappings "
                 "Engine=InnoDB")
 
     volumes = Table('volumes', meta, autoload=True)
index 7c8b874ccee45074e15d3c8d71df86771919fb4a..545157bd64415a647b7436d0b5887a29319118da 100755 (executable)
@@ -60,7 +60,7 @@ def import_normalize(line):
            split_line[2] == "import" and split_line[3] != "*" and
            split_line[1] != "__future__" and
            (len(split_line) == 4 or
-           (len(split_line) == 6  and split_line[4] == "as"))):
+           (len(split_line) == 6 and split_line[4] == "as"))):
         return "import %s.%s" % (split_line[1], split_line[3])
     else:
         return line
@@ -212,8 +212,8 @@ def cinder_import_alphabetical(physical_line, line_number, lines):
             ).strip().lower().split()
     # with or without "as y"
     length = [2, 4]
-    if (len(split_line) in length  and len(split_previous) in length and
-        split_line[0] == "import" and  split_previous[0] == "import"):
+    if (len(split_line) in length and len(split_previous) in length and
+        split_line[0] == "import" and split_previous[0] == "import"):
         if split_line[1] < split_previous[1]:
             return (0,
                     "CINDER N306: imports not in alphabetical order (%s, %s)"
@@ -244,7 +244,7 @@ def cinder_docstring_one_line(physical_line):
     """
     pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])  # start
     end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE])  # end
-    if (pos != -1  and end and len(physical_line) > pos + 4):
+    if (pos != -1 and end and len(physical_line) > pos + 4):
         if (physical_line[-5] != '.'):
             return pos, "CINDER N402: one line docstring needs a period"
 
index 809db018d0d046d8bc61ebea5817b1e62f141c10..51ef2ff632c6961a3a3a28127d3fe6ecfd2998eb 100644 (file)
@@ -6,6 +6,6 @@ mox==0.5.3
 nose
 nosexcover
 openstack.nose_plugin
-pep8>=1.0
+pep8==1.1
 sphinx>=1.1.2
 MySQL-python