]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remote unused iscsi_targets table
authorGorka Eguileor <geguileo@redhat.com>
Fri, 15 Jan 2016 16:17:44 +0000 (17:17 +0100)
committerGorka Eguileor <geguileo@redhat.com>
Fri, 15 Jan 2016 20:14:35 +0000 (21:14 +0100)
iscsi_targets table, ORM class IscsiTarget, and db methods are not
currently being used, so we can drop them.

Closes-Bug: #1534808
Change-Id: I5bd0013f8fbcce18c844a1b051c3d385176f7e5b

cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/db/sqlalchemy/migrate_repo/README
cinder/db/sqlalchemy/migrate_repo/versions/063_drop_iscsi_targets_table.py [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/tests/unit/test_db_api.py
cinder/tests/unit/test_volume.py

index 0be4ec83a1ed132d2c5e5c59d1d1af2e76b0e268..2859f99ee66579ac5cf7d627cf3bee9d4cb6ae3c 100644 (file)
@@ -135,25 +135,6 @@ def service_update(context, service_id, values):
     return IMPL.service_update(context, service_id, values)
 
 
-###################
-
-
-def iscsi_target_count_by_host(context, host):
-    """Return count of export devices."""
-    return IMPL.iscsi_target_count_by_host(context, host)
-
-
-def iscsi_target_create_safe(context, values):
-    """Create an iscsi_target from the values dictionary.
-
-    The device is not returned. If the create violates the unique
-    constraints because the iscsi_target and host already exist,
-    no exception is raised.
-
-    """
-    return IMPL.iscsi_target_create_safe(context, values)
-
-
 ###############
 
 
@@ -235,11 +216,6 @@ def volume_get_all_by_project(context, project_id, marker, limit,
                                           offset=offset)
 
 
-def volume_get_iscsi_target_num(context, volume_id):
-    """Get the target num (tid) allocated to the volume."""
-    return IMPL.volume_get_iscsi_target_num(context, volume_id)
-
-
 def volume_update(context, volume_id, values):
     """Set the given properties on a volume and update it.
 
index a5fda055d98f89c4adcc5692ce18a34f8b50479a..f859bb7159ce9643a93353172d85d76176491715 100644 (file)
@@ -491,33 +491,6 @@ def _dict_with_extra_specs_if_authorized(context, inst_type_query):
 ###################
 
 
-@require_admin_context
-def iscsi_target_count_by_host(context, host):
-    return model_query(context, models.IscsiTarget).\
-        filter_by(host=host).\
-        count()
-
-
-@require_admin_context
-def iscsi_target_create_safe(context, values):
-    iscsi_target_ref = models.IscsiTarget()
-
-    for (key, value) in values.items():
-        iscsi_target_ref[key] = value
-    session = get_session()
-
-    try:
-        with session.begin():
-            session.add(iscsi_target_ref)
-            return iscsi_target_ref
-    except db_exc.DBDuplicateEntry:
-        LOG.debug("Can not add duplicate IscsiTarget.")
-        return None
-
-
-###################
-
-
 @require_context
 def _quota_get(context, project_id, resource, session=None):
     result = model_query(context, models.Quota, session=session,
@@ -1254,9 +1227,6 @@ def volume_destroy(context, volume_id):
                     'deleted_at': now,
                     'updated_at': literal_column('updated_at'),
                     'migration_status': None})
-        model_query(context, models.IscsiTarget, session=session).\
-            filter_by(volume_id=volume_id).\
-            update({'volume_id': None})
         model_query(context, models.VolumeMetadata, session=session).\
             filter_by(volume_id=volume_id).\
             update({'deleted': True,
@@ -1759,18 +1729,6 @@ def process_sort_params(sort_keys, sort_dirs, default_keys=None,
     return result_keys, result_dirs
 
 
-@require_admin_context
-def volume_get_iscsi_target_num(context, volume_id):
-    result = model_query(context, models.IscsiTarget, read_deleted="yes").\
-        filter_by(volume_id=volume_id).\
-        first()
-
-    if not result:
-        raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
-
-    return result.target_num
-
-
 @require_context
 def volume_update(context, volume_id, values):
     session = get_session()
index 6218f8cac424e81c946faaccf08ab6c93db0fe2a..2f81df17aea90906fdd02cff5b46fbe0c06a415c 100644 (file)
@@ -1,4 +1,7 @@
 This is a database migration repository.
 
-More information at
-http://code.google.com/p/sqlalchemy-migrate/
+More information at:
+    https://github.com/openstack/sqlalchemy-migrate
+
+Original project is no longer maintained at:
+    http://code.google.com/p/sqlalchemy-migrate/
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/063_drop_iscsi_targets_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/063_drop_iscsi_targets_table.py
new file mode 100644 (file)
index 0000000..f882794
--- /dev/null
@@ -0,0 +1,20 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import MetaData, Table
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+    table = Table('iscsi_targets', meta, autoload=True)
+    table.drop()
index 04768738754ac86ef946a3d05baca9e2fe1c781d..fb7d949c6444bdf0dc4a13e85db53db767bd068b 100644 (file)
@@ -498,22 +498,6 @@ class SnapshotMetadata(BASE, CinderBase):
                             'SnapshotMetadata.deleted == False)')
 
 
-class IscsiTarget(BASE, CinderBase):
-    """Represents an iscsi target for a given host."""
-    __tablename__ = 'iscsi_targets'
-    __table_args__ = (schema.UniqueConstraint("target_num", "host"),
-                      {'mysql_engine': 'InnoDB'})
-    id = Column(Integer, primary_key=True)
-    target_num = Column(Integer)
-    host = Column(String(255))
-    volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
-    volume = relationship(Volume,
-                          backref=backref('iscsi_target', uselist=False),
-                          foreign_keys=volume_id,
-                          primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
-                          'IscsiTarget.deleted==False)')
-
-
 class Backup(BASE, CinderBase):
     """Represents a backup of a volume to Swift."""
     __tablename__ = 'backups'
index c9eb9a24c96b8af28343e482fff06eaccfd2dda1..058d895b0573189d80e5a72239625ea58a00fa1b 100644 (file)
@@ -921,15 +921,6 @@ class DBAPIVolumeTestCase(BaseTest):
             self.assertRaises(exception.InvalidInput, db.volume_get_all,
                               self.ctxt, None, None, sort_keys=keys)
 
-    def test_volume_get_iscsi_target_num(self):
-        db.iscsi_target_create_safe(self.ctxt, {'volume_id': 42,
-                                                'target_num': 43})
-        self.assertEqual(43, db.volume_get_iscsi_target_num(self.ctxt, 42))
-
-    def test_volume_get_iscsi_target_num_nonexistent(self):
-        self.assertRaises(exception.ISCSITargetNotFoundForVolume,
-                          db.volume_get_iscsi_target_num, self.ctxt, 42)
-
     def test_volume_update(self):
         volume = db.volume_create(self.ctxt, {'host': 'h1'})
         ref_a = db.volume_update(self.ctxt, volume['id'],
@@ -1868,35 +1859,6 @@ class DBAPIQuotaTestCase(BaseTest):
                          self.ctxt, 'p1'))
 
 
-class DBAPIIscsiTargetTestCase(BaseTest):
-
-    """Unit tests for cinder.db.api.iscsi_target_*."""
-
-    def _get_base_values(self):
-        return {'target_num': 10, 'host': 'fake_host'}
-
-    def test_iscsi_target_create_safe(self):
-        target = db.iscsi_target_create_safe(self.ctxt,
-                                             self._get_base_values())
-        self.assertTrue(target['id'])
-        self.assertEqual('fake_host', target['host'])
-        self.assertEqual(10, target['target_num'])
-
-    def test_iscsi_target_count_by_host(self):
-        for i in range(3):
-            values = self._get_base_values()
-            values['target_num'] += i
-            db.iscsi_target_create_safe(self.ctxt, values)
-        self.assertEqual(3,
-                         db.iscsi_target_count_by_host(self.ctxt, 'fake_host'))
-
-    def test_integrity_error(self):
-        values = self._get_base_values()
-        values['id'] = 1
-        db.iscsi_target_create_safe(self.ctxt, values)
-        self.assertFalse(db.iscsi_target_create_safe(self.ctxt, values))
-
-
 class DBAPIBackupTestCase(BaseTest):
 
     """Tests for db.api.backup_* methods."""
index 44dea0bd47df466657798b50ec2fec4a70ee3bf8..4ba4610f2072290ce56383476cd82e3264d8ac11 100644 (file)
@@ -2956,27 +2956,6 @@ class VolumeTestCase(BaseVolumeTestCase):
         db_volume = db.volume_get(self.context, volume.id)
         self.assertEqual('available', db_volume.status)
 
-    def test_concurrent_volumes_get_different_targets(self):
-        """Ensure multiple concurrent volumes get different targets."""
-        volume_ids = []
-        targets = []
-
-        def _check(volume_id):
-            """Make sure targets aren't duplicated."""
-            volume_ids.append(volume_id)
-            admin_context = context.get_admin_context()
-            iscsi_target = db.volume_get_iscsi_target_num(admin_context,
-                                                          volume_id)
-            self.assertNotIn(iscsi_target, targets)
-            targets.append(iscsi_target)
-
-        # FIXME(jdg): What is this actually testing?
-        # We never call the internal _check method?
-        for _index in range(100):
-            tests_utils.create_volume(self.context, **self.volume_params)
-        for volume_id in volume_ids:
-            self.volume.delete_volume(self.context, volume_id)
-
     def test_multi_node(self):
         # TODO(termie): Figure out how to test with two nodes,
         # each of them having a different FLAG for storage_node