From: Salvatore Orlando Date: Thu, 12 Sep 2013 22:23:46 +0000 (-0700) Subject: Fix failure in multi-chunk state sync for nicira plugin X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=de1db2023597dd3555da33bd86582627c85d4856;p=openstack-build%2Fneutron-build.git Fix failure in multi-chunk state sync for nicira plugin Bug 1224719 This patch ensures the 'result_count' attribute from NVP API responses is properly handled, and adds support for '_page_cursor' request query parameter in the fake nvp api client in order to ensure issues like this are captured by unit tests. Change-Id: I1d6179bd58a14d19729fc882f004b6d1afccbe3d --- diff --git a/neutron/plugins/nicira/common/sync.py b/neutron/plugins/nicira/common/sync.py index 449be2957..9a1c422cf 100644 --- a/neutron/plugins/nicira/common/sync.py +++ b/neutron/plugins/nicira/common/sync.py @@ -457,24 +457,22 @@ class NvpSynchronizer(): # API. In this case the request should be split in multiple # requests. This is not ideal, and therefore a log warning will # be emitted. - requests = range(0, page_size / (nvplib.MAX_PAGE_SIZE + 1) + 1) - if len(requests) > 1: + num_requests = page_size / (nvplib.MAX_PAGE_SIZE + 1) + 1 + if num_requests > 1: LOG.warn(_("Requested page size is %(cur_chunk_size)d." "It might be necessary to do %(num_requests)d " "round-trips to NVP for fetching data. Please " "tune sync parameters to ensure chunk size " "is less than %(max_page_size)d"), {'cur_chunk_size': page_size, - 'num_requests': len(requests), + 'num_requests': num_requests, 'max_page_size': nvplib.MAX_PAGE_SIZE}) - results = [] - actual_size = 0 - for _req in requests: - req_results, cursor, req_size = nvplib.get_single_query_page( - uri, self._cluster, cursor, - min(page_size, nvplib.MAX_PAGE_SIZE)) - results.extend(req_results) - actual_size = actual_size + req_size + # Only the first request might return the total size, + # subsequent requests will definetely not + results, cursor, total_size = nvplib.get_single_query_page( + uri, self._cluster, cursor, + min(page_size, nvplib.MAX_PAGE_SIZE)) + for _req in range(0, num_requests - 1): # If no cursor is returned break the cycle as there is no # actual need to perform multiple requests (all fetched) # This happens when the overall size of resources exceeds @@ -482,9 +480,13 @@ class NvpSynchronizer(): # resource type is below this threshold if not cursor: break + req_results, cursor = nvplib.get_single_query_page( + uri, self._cluster, cursor, + min(page_size, nvplib.MAX_PAGE_SIZE))[:2] + results.extend(req_results) # reset cursor before returning if we queried just to # know the number of entities - return results, cursor if page_size else 'start', actual_size + return results, cursor if page_size else 'start', total_size return [], cursor, None def _fetch_nvp_data_chunk(self, sp): diff --git a/neutron/tests/unit/nicira/fake_nvpapiclient.py b/neutron/tests/unit/nicira/fake_nvpapiclient.py index e8b6f5b41..b1eb31bfd 100644 --- a/neutron/tests/unit/nicira/fake_nvpapiclient.py +++ b/neutron/tests/unit/nicira/fake_nvpapiclient.py @@ -135,7 +135,7 @@ class FakeClient: def _get_filters(self, querystring): if not querystring: - return (None, None, None) + return (None, None, None, None) params = urlparse.parse_qs(querystring) tag_filter = None attr_filter = None @@ -144,15 +144,15 @@ class FakeClient: 'tag': params['tag'][0]} elif 'uuid' in params: attr_filter = {'uuid': params['uuid'][0]} - # Handle page_length - # TODO(salv-orlando): Handle page cursor too + # Handle page length and page cursor parameter page_len = params.get('_page_length') + page_cursor = params.get('_page_cursor') if page_len: page_len = int(page_len[0]) else: # Explicitly set it to None (avoid 0 or empty list) page_len = None - return (tag_filter, attr_filter, page_len) + return (tag_filter, attr_filter, page_len, page_cursor) def _add_lswitch(self, body): fake_lswitch = json.loads(body) @@ -369,7 +369,11 @@ class FakeClient: def _list(self, resource_type, response_file, parent_uuid=None, query=None, relations=None): - (tag_filter, attr_filter, page_len) = self._get_filters(query) + (tag_filter, attr_filter, + page_len, page_cursor) = self._get_filters(query) + # result_count attribute in response should appear only when + # page_cursor is not specified + do_result_count = not page_cursor with open("%s/%s" % (self.fake_files_path, response_file)) as f: response_template = f.read() res_dict = getattr(self, '_fake_%s_dict' % resource_type) @@ -410,6 +414,15 @@ class FakeClient: return True return False + def _cursor_match(res_uuid, page_cursor): + if not page_cursor: + return True + if page_cursor == res_uuid: + # always return True once page_cursor has been found + page_cursor = None + return True + return False + def _build_item(resource): item = json.loads(response_template % resource) if relations: @@ -437,7 +450,8 @@ class FakeClient: for res_uuid in res_dict if (parent_func(res_uuid) and _tag_match(res_uuid) and - _attr_match(res_uuid))] + _attr_match(res_uuid) and + _cursor_match(res_uuid, page_cursor))] # Rather inefficient, but hey this is just a mock! next_cursor = None total_items = len(items) @@ -447,10 +461,11 @@ class FakeClient: except IndexError: next_cursor = None items = items[:page_len] - response_dict = {'results': items, - 'result_count': total_items} + response_dict = {'results': items} if next_cursor: response_dict['page_cursor'] = next_cursor + if do_result_count: + response_dict['result_count'] = total_items return json.dumps(response_dict) def _show(self, resource_type, response_file,