Skip to content

Commit

Permalink
Using skipped results to modify query offset.
Browse files Browse the repository at this point in the history
  • Loading branch information
dhermes committed May 17, 2016
1 parent 826fe30 commit 3c0b3e1
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 11 deletions.
8 changes: 5 additions & 3 deletions gcloud/datastore/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,7 @@ def __init__(self, query, client, limit=None, offset=None,
self._start_cursor = start_cursor
self._end_cursor = end_cursor
self._page = self._more_results = None
self._skipped_results = None

def next_page(self):
"""Fetch a single "page" of query results.
Expand Down Expand Up @@ -432,7 +433,8 @@ def next_page(self):
# results. See
# https://github.com/GoogleCloudPlatform/gcloud-python/issues/280
# for discussion.
entity_pbs, cursor_as_bytes, more_results_enum = query_results[:3]
(entity_pbs, cursor_as_bytes,
more_results_enum, self._skipped_results) = query_results

if cursor_as_bytes == b'':
self._start_cursor = None
Expand Down Expand Up @@ -466,10 +468,10 @@ def __iter__(self):
num_results = len(self._page)
if self._limit is not None:
self._limit -= num_results
if self._offset is not None:
if self._offset is not None and self._skipped_results is not None:
# NOTE: The offset goes down relative to the location
# because we are updating the cursor each time.
self._offset -= num_results
self._offset -= self._skipped_results
self.next_page()


Expand Down
20 changes: 12 additions & 8 deletions gcloud/datastore/test_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,8 @@ def _getTargetClass(self):
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)

def _addQueryResults(self, connection, cursor=_END, more=False):
def _addQueryResults(self, connection, cursor=_END, more=False,
skipped_results=None):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore._generated import query_pb2
from gcloud.datastore.helpers import _new_value_pb
Expand All @@ -361,7 +362,7 @@ def _addQueryResults(self, connection, cursor=_END, more=False):
value_pb = _new_value_pb(entity_pb, 'foo')
value_pb.string_value = u'Foo'
connection._results.append(
([entity_pb], cursor, MORE if more else NO_MORE))
([entity_pb], cursor, MORE if more else NO_MORE, skipped_results))

def _makeClient(self, connection=None):
if connection is None:
Expand Down Expand Up @@ -476,8 +477,8 @@ def test_next_page_w_cursors_w_bogus_more(self):
client = self._makeClient(connection)
query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE)
self._addQueryResults(connection, cursor=self._END, more=True)
epb, cursor, _ = connection._results.pop()
connection._results.append((epb, cursor, 4)) # invalid enum
epb, cursor, _, _ = connection._results.pop()
connection._results.append((epb, cursor, 4, None)) # invalid enum
iterator = self._makeOne(query, client)
self.assertRaises(ValueError, iterator.next_page)

Expand Down Expand Up @@ -547,9 +548,12 @@ def test___iter___w_limit(self):
connection = _Connection()
client = self._makeClient(connection)
query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE)
self._addQueryResults(connection, more=True)
skipped_results = 4
self._addQueryResults(connection, more=True,
skipped_results=skipped_results)
self._addQueryResults(connection)
iterator = self._makeOne(query, client, limit=2, offset=13)
offset = 13
iterator = self._makeOne(query, client, limit=2, offset=offset)
entities = list(iterator)

self.assertFalse(iterator._more_results)
Expand All @@ -561,11 +565,11 @@ def test___iter___w_limit(self):
self.assertEqual(entities[1]['foo'], u'Foo')
qpb1 = _pb_from_query(query)
qpb1.limit.value = 2
qpb1.offset = 13
qpb1.offset = offset
qpb2 = _pb_from_query(query)
qpb2.start_cursor = self._END
qpb2.limit.value = 1
qpb2.offset = 12
qpb2.offset = offset - skipped_results
EXPECTED1 = {
'project': self._PROJECT,
'query_pb': qpb1,
Expand Down

0 comments on commit 3c0b3e1

Please sign in to comment.