_warn_on_bytestring=False),
nullable=True)
instances.create_column(instances_os_type)
- migrate_engine.execute(instances.update()\
- .where(instances.c.os_type == None)\
+ migrate_engine.execute(instances.update()
+ .where(instances.c.os_type is None)
.values(os_type='linux'))
type_names[row[0]] = row[1]
for type_id, type_name in type_names.iteritems():
- migrate_engine.execute(instances.update()\
- .where(instances.c.instance_type == type_name)\
+ migrate_engine.execute(instances.update()
+ .where(instances.c.instance_type == type_name)
.values(instance_type_id=type_id))
instances.c.instance_type.drop()
type_names[row[0]] = row[1]
for type_id, type_name in type_names.iteritems():
- migrate_engine.execute(instances.update()\
- .where(instances.c.instance_type_id == type_id)\
+ migrate_engine.execute(instances.update()
+ .where(instances.c.instance_type_id == type_id)
.values(instance_type=type_name))
instances.c.instance_type_id.drop()
def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
- select = quotas.select().where(quotas.c.deleted == False)
+ select = quotas.select().where(quotas.c.deleted is False)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
"""Ensure that there are no duplicate non-deleted quota entries."""
for resource in resources:
select = quotas.select().\
- where(quotas.c.deleted == False).\
+ where(quotas.c.deleted is False).\
where(quotas.c.resource == resource)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
rows = migrate_engine.execute(instances.select())
for row in rows:
instance_uuid = str(utils.gen_uuid())
- migrate_engine.execute(instances.update()\
- .where(instances.c.id == row[0])\
+ migrate_engine.execute(instances.update()
+ .where(instances.c.id == row[0])
.values(uuid=instance_uuid))
# populate the fixed_ips virtual_interface_id column
s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
- fixed_ips.c.instance_id != None)
+ fixed_ips.c.instance_id is not None)
for row in s.execute():
m = select([virtual_interfaces.c.id]).\
itypes[instance_type.id] = instance_type.flavorid
for instance_type_id in itypes.keys():
- migrate_engine.execute(migrations.update()\
- .where(migrations.c.old_flavor_id == itypes[instance_type_id])\
+ migrate_engine.execute(migrations.update()
+ .where(migrations.c.old_flavor_id == itypes[instance_type_id])
.values(old_instance_type_id=instance_type_id))
- migrate_engine.execute(migrations.update()\
- .where(migrations.c.new_flavor_id == itypes[instance_type_id])\
+ migrate_engine.execute(migrations.update()
+ .where(migrations.c.new_flavor_id == itypes[instance_type_id])
.values(new_instance_type_id=instance_type_id))
migrations.c.old_flavor_id.drop()
itypes[instance_type.flavorid] = instance_type.id
for instance_type_flavorid in itypes.keys():
- migrate_engine.execute(migrations.update()\
+ migrate_engine.execute(migrations.update()
.where(migrations.c.old_instance_type_id ==
- itypes[instance_type_flavorid])\
+ itypes[instance_type_flavorid])
.values(old_flavor_id=instance_type_flavorid))
- migrate_engine.execute(migrations.update()\
+ migrate_engine.execute(migrations.update()
.where(migrations.c.new_instance_type_id ==
- itypes[instance_type_flavorid])\
+ itypes[instance_type_flavorid])
.values(new_flavor_id=instance_type_flavorid))
migrations.c.old_instance_type_id.drop()
rows = migrate_engine.execute(virtual_interfaces.select())
for row in rows:
vif_uuid = str(utils.gen_uuid())
- migrate_engine.execute(virtual_interfaces.update()\
- .where(virtual_interfaces.c.id == row[0])\
+ migrate_engine.execute(virtual_interfaces.update()
+ .where(virtual_interfaces.c.id == row[0])
.values(uuid=vif_uuid))
rows = migrate_engine.execute(networks.select())
for row in rows:
networks_uuid = str(utils.gen_uuid())
- migrate_engine.execute(networks.update()\
- .where(networks.c.id == row[0])\
+ migrate_engine.execute(networks.update()
+ .where(networks.c.id == row[0])
.values(uuid=networks_uuid))
bw_usage_cache.create_column(mac_column)
bw_usage_cache.update()\
- .values(mac=select([vifs.c.address])\
+ .values(mac=select([vifs.c.address])
.where(and_(
networks.c.label == bw_usage_cache.c.network_label,
networks.c.id == vifs.c.network_id,
- bw_usage_cache.c.instance_id == vifs.c.instance_id))\
+ bw_usage_cache.c.instance_id == vifs.c.instance_id))
.as_scalar()).execute()
bw_usage_cache.c.network_label.drop()
bw_usage_cache.create_column(network_label_column)
bw_usage_cache.update()\
- .values(network_label=select([network.c.label])\
+ .values(network_label=select([network.c.label])
.where(and_(
network.c.id == vifs.c.network_id,
vifs.c.address == bw_usage_cache.c.mac,
- bw_usage_cache.c.instance_id == vifs.c.instance_id))\
+ bw_usage_cache.c.instance_id == vifs.c.instance_id))
.as_scalar()).execute()
bw_usage_cache.c.mac.drop()
for row in migrate_engine.execute(bw_usage_cache.select()):
instance_id = cache[row['mac']]
- migrate_engine.execute(bw_usage_cache.update()\
- .where(bw_usage_cache.c.id == row['id'])\
+ migrate_engine.execute(bw_usage_cache.update()
+ .where(bw_usage_cache.c.id == row['id'])
.values(instance_id=instance_id))
if migrate_engine.name == "mysql":
migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE snapshot_id_mappings "\
+ migrate_engine.execute("ALTER TABLE snapshot_id_mappings "
"Engine=InnoDB")
volumes = Table('volumes', meta, autoload=True)
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
- (len(split_line) == 6 and split_line[4] == "as"))):
+ (len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
).strip().lower().split()
# with or without "as y"
length = [2, 4]
- if (len(split_line) in length and len(split_previous) in length and
- split_line[0] == "import" and split_previous[0] == "import"):
+ if (len(split_line) in length and len(split_previous) in length and
+ split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0,
"CINDER N306: imports not in alphabetical order (%s, %s)"
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
- if (pos != -1 and end and len(physical_line) > pos + 4):
+ if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "CINDER N402: one line docstring needs a period"
nose
nosexcover
openstack.nose_plugin
-pep8>=1.0
+pep8==1.1
sphinx>=1.1.2
MySQL-python