diff --git a/firestore/google/cloud/firestore_v1/query.py b/firestore/google/cloud/firestore_v1/query.py index 4aa3d2f22804..6f4c498c0725 100644 --- a/firestore/google/cloud/firestore_v1/query.py +++ b/firestore/google/cloud/firestore_v1/query.py @@ -666,7 +666,12 @@ def _normalize_cursor(self, cursor, orders): data = document_fields for order_key in order_keys: try: - values.append(field_path_module.get_nested_value(order_key, data)) + if order_key in data: + values.append(data[order_key]) + else: + values.append( + field_path_module.get_nested_value(order_key, data) + ) except KeyError: msg = _MISSING_ORDER_BY.format(order_key, data) raise ValueError(msg) diff --git a/firestore/tests/system/test_system.py b/firestore/tests/system/test_system.py index 40c1e2875223..f2d30c94a171 100644 --- a/firestore/tests/system/test_system.py +++ b/firestore/tests/system/test_system.py @@ -611,6 +611,43 @@ def test_query_stream(client, cleanup): assert value["b"] == 2 +def test_query_with_order_dot_key(client, cleanup): + db = client + collection_id = "collek" + unique_resource_id("-") + collection = db.collection(collection_id) + for index in range(100, -1, -1): + doc = collection.document("test_{:09d}".format(index)) + data = {"count": 10 * index, "wordcount": {"page1": index * 10 + 100}} + doc.set(data) + cleanup(doc.delete) + query = collection.order_by("wordcount.page1").limit(3) + data = [doc.to_dict()["wordcount"]["page1"] for doc in query.stream()] + assert [100, 110, 120] == data + for snapshot in collection.order_by("wordcount.page1").limit(3).stream(): + last_value = snapshot.get("wordcount.page1") + cursor_with_nested_keys = {"wordcount": {"page1": last_value}} + found = list( + collection.order_by("wordcount.page1") + .start_after(cursor_with_nested_keys) + .limit(3) + .stream() + ) + found_data = [ + {u"count": 30, u"wordcount": {u"page1": 130}}, + {u"count": 40, u"wordcount": {u"page1": 140}}, + {u"count": 50, u"wordcount": {u"page1": 150}}, + ] + assert found_data == [snap.to_dict() for snap in found] + cursor_with_dotted_paths = {"wordcount.page1": last_value} + cursor_with_key_data = list( + collection.order_by("wordcount.page1") + .start_after(cursor_with_dotted_paths) + .limit(3) + .stream() + ) + assert found_data == [snap.to_dict() for snap in cursor_with_key_data] + + def test_query_unary(client, cleanup): collection_name = "unary" + UNIQUE_RESOURCE_ID collection = client.collection(collection_name) diff --git a/firestore/tests/unit/v1/test_query.py b/firestore/tests/unit/v1/test_query.py index a690ba0c7ab1..a4911fecb44f 100644 --- a/firestore/tests/unit/v1/test_query.py +++ b/firestore/tests/unit/v1/test_query.py @@ -808,6 +808,16 @@ def test__normalize_cursor_as_dict_hit(self): self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) + def test__normalize_cursor_as_dict_with_dot_key_hit(self): + cursor = ({"b.a": 1}, True) + query = self._make_one(mock.sentinel.parent).order_by("b.a", "ASCENDING") + self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) + + def test__normalize_cursor_as_dict_with_inner_data_hit(self): + cursor = ({"b": {"a": 1}}, True) + query = self._make_one(mock.sentinel.parent).order_by("b.a", "ASCENDING") + self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) + def test__normalize_cursor_as_snapshot_hit(self): values = {"b": 1} docref = self._make_docref("here", "doc_id")