diff --git a/.travis.yml b/.travis.yml index a22d5553b58c..b74e2f4e7e1d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,8 @@ script: - tox -e py26 - tox -e py27 - tox -e lint + - tox -e regression after_success: - tox -e cover - coveralls - - scripts/merge.sh + - scripts/update_docs.sh diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1988f04feb2b..1ad10bebc3d2 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -3,7 +3,7 @@ Contributing #. **Please sign one of the contributor license agreements below.** #. Fork the repo, develop and test your code changes, add docs. -#. Make sure that your commit messages clearly describe the changes. +#. Make sure that your commit messages clearly describe the changes. #. Send a pull request. Here are some guidelines for hacking on gcloud-python. @@ -16,7 +16,7 @@ using a Git checkout: - While logged into your GitHub account, navigate to the gcloud-python repo on GitHub. - + https://github.com/GoogleCloudPlatform/gcloud-python - Fork and clone the gcloud-python repository to your GitHub account by @@ -130,6 +130,70 @@ Running Tests $ cd ~/hack-on-gcloud/ $ /usr/bin/tox +Running Regression Tests +------------------------ + +- To run regression tests you can execute:: + + $ tox -e regression + + or run only regression tests for a particular package via:: + + $ python regression/run_regression.py --package {package} + + This alone will not run the tests. You'll need to change some local + auth settings and change some configuration in your project to + run all the tests. + +- Regression tests will be run against an actual project and + so you'll need to provide some environment variables to facilitate + authentication to your project: + + - ``GCLOUD_TESTS_DATASET_ID``: The name of the dataset your tests connect to. + - ``GCLOUD_TESTS_CLIENT_EMAIL``: The email for the service account you're + authenticating with + - ``GCLOUD_TESTS_KEY_FILE``: The path to an encrypted key file. + See private key + `docs `__ + for explanation on how to get a private key. + +- Examples of these can be found in ``regression/local_test_setup.sample``. We + recommend copying this to ``regression/local_test_setup``, editing the values + and sourcing them into your environment:: + + $ source regression/local_test_setup + +- The ``GCLOUD_TESTS_KEY_FILE`` value should point to a valid path (relative or + absolute) on your system where the key file for your service account can + be found. + +- For datastore tests, you'll need to create composite + `indexes `__ + with the ``gcloud`` command line + `tool `__:: + + # Install the app (App Engine Command Line Interface) component. + $ gcloud components update app + + # See https://cloud.google.com/sdk/crypto for details on PyOpenSSL and + # http://stackoverflow.com/a/25067729/1068170 for why we must persist. + $ export CLOUDSDK_PYTHON_SITEPACKAGES=1 + + # Authenticate the gcloud tool with your account. + $ gcloud auth activate-service-account $GCLOUD_TESTS_CLIENT_EMAIL \ + > --key-file=$GCLOUD_TESTS_KEY_FILE + + # Create the indexes + $ gcloud preview datastore create-indexes regression/data/ \ + > --project=$GCLOUD_TESTS_DATASET_ID + + # Restore your environment to its previous state. + $ unset CLOUDSDK_PYTHON_SITEPACKAGES + +- For datastore query tests, you'll need stored data in your dataset. + To populate this data, run:: + + $ python regression/populate_datastore.py Test Coverage ------------- @@ -184,4 +248,4 @@ Before we can accept your pull requests you'll need to sign a Contributor Licens - **If you are an individual writing original source code** and **you own the intellectual property**, then you'll need to sign an `individual CLA `__. - **If you work for a company that wants to allow you to contribute your work**, then you'll need to sign a `corporate CLA `__. -You can sign these electronically (just scroll to the bottom). After that, we'll be able to accept your pull requests. \ No newline at end of file +You can sign these electronically (just scroll to the bottom). After that, we'll be able to accept your pull requests. diff --git a/regression/data/index.yaml b/regression/data/index.yaml new file mode 100644 index 000000000000..5a2d2b1a8bc9 --- /dev/null +++ b/regression/data/index.yaml @@ -0,0 +1,23 @@ +indexes: + +- kind: Character + ancestor: yes + properties: + - name: appearances + +- kind: Character + ancestor: yes + properties: + - name: alive + +- kind: Character + ancestor: yes + properties: + - name: family + - name: appearances + +- kind: Character + ancestor: yes + properties: + - name: name + - name: family diff --git a/regression/datastore.py b/regression/datastore.py index 047b6f586dfe..c1a5d212680a 100644 --- a/regression/datastore.py +++ b/regression/datastore.py @@ -5,37 +5,31 @@ from gcloud import datastore # This assumes the command is being run via tox hence the # repository root is the current directory. +from regression import populate_datastore from regression import regression_utils class TestDatastore(unittest2.TestCase): - def setUp(self): - environ = regression_utils.get_environ() - self._dataset_id = environ['dataset_id'] - self._client_email = environ['client_email'] - self._key_filename = environ['key_filename'] - self._datasets = {} + @classmethod + def setUpClass(cls): + cls.dataset = regression_utils.get_dataset() - self.entities_to_delete = [] + def setUp(self): + self.case_entities_to_delete = [] def tearDown(self): - for entity in self.entities_to_delete: - entity.delete() + with self.dataset.transaction(): + for entity in self.case_entities_to_delete: + entity.delete() + - def _get_dataset(self): - if self._dataset_id not in self._datasets: - self._datasets[self._dataset_id] = datastore.get_dataset( - self._dataset_id, self._client_email, self._key_filename) - return self._datasets[self._dataset_id] +class TestDatastoreSave(TestDatastore): def _get_post(self, name=None, key_id=None, post_content=None): post_content = post_content or { 'title': 'How to make the perfect pizza in your grill', 'tags': ['pizza', 'grill'], - # NOTE: We don't support datetime.date, but should. - # NOTE: Without a tz, assertEqual fails with - # "can't compare offset-naive and offset-aware datetimes" 'publishedAt': datetime.datetime(2001, 1, 1, tzinfo=pytz.utc), 'author': 'Silvano', 'isDraft': False, @@ -43,8 +37,7 @@ def _get_post(self, name=None, key_id=None, post_content=None): 'rating': 5.0, } # Create an entity with the given content in our dataset. - dataset = self._get_dataset() - entity = dataset.entity(kind='Post') + entity = self.dataset.entity(kind='Post') entity.update(post_content) # Update the entity key. @@ -63,16 +56,18 @@ def _generic_test_post(self, name=None, key_id=None): entity.save() # Register entity to be deleted. - self.entities_to_delete.append(entity) + self.case_entities_to_delete.append(entity) if name is not None: self.assertEqual(entity.key().name(), name) if key_id is not None: self.assertEqual(entity.key().id(), key_id) - retrieved_entity = self._get_dataset().get_entity(entity.key()) + retrieved_entity = self.dataset.get_entity(entity.key()) # Check the keys are the same. - self.assertEqual(retrieved_entity.key().path(), - entity.key().path()) + self.assertEqual(retrieved_entity.key().path(), entity.key().path()) + self.assertEqual(retrieved_entity.key().namespace(), + entity.key().namespace()) + # Check the data is the same. retrieved_dict = dict(retrieved_entity.items()) entity_dict = dict(entity.items()) @@ -88,12 +83,11 @@ def test_post_with_generated_id(self): self._generic_test_post() def test_save_multiple(self): - dataset = self._get_dataset() - with dataset.transaction(): + with self.dataset.transaction(): entity1 = self._get_post() entity1.save() # Register entity to be deleted. - self.entities_to_delete.append(entity1) + self.case_entities_to_delete.append(entity1) second_post_content = { 'title': 'How to make the perfect homemade pasta', @@ -107,12 +101,188 @@ def test_save_multiple(self): entity2 = self._get_post(post_content=second_post_content) entity2.save() # Register entity to be deleted. - self.entities_to_delete.append(entity2) + self.case_entities_to_delete.append(entity2) keys = [entity1.key(), entity2.key()] - matches = dataset.get_entities(keys) + matches = self.dataset.get_entities(keys) self.assertEqual(len(matches), 2) def test_empty_kind(self): - posts = self._get_dataset().query().kind('Post').limit(2).fetch() + posts = self.dataset.query('Post').limit(2).fetch() self.assertEqual(posts, []) + + +class TestDatastoreQuery(TestDatastore): + + @classmethod + def setUpClass(cls): + super(TestDatastoreQuery, cls).setUpClass() + cls.CHARACTERS = populate_datastore.CHARACTERS + cls.ANCESTOR_KEY = datastore.key.Key( + path=[populate_datastore.ANCESTOR]) + + def _base_query(self): + return self.dataset.query('Character').ancestor(self.ANCESTOR_KEY) + + def test_limit_queries(self): + limit = 5 + query = self._base_query().limit(limit) + # Verify there is not cursor before fetch(). + self.assertRaises(RuntimeError, query.cursor) + + # Fetch characters. + character_entities = query.fetch() + self.assertEqual(len(character_entities), limit) + + # Check cursor after fetch. + cursor = query.cursor() + self.assertTrue(cursor is not None) + + # Fetch next batch of characters. + new_query = self._base_query().with_cursor(cursor) + new_character_entities = new_query.fetch() + characters_remaining = len(self.CHARACTERS) - limit + self.assertEqual(len(new_character_entities), characters_remaining) + + def test_query_simple_filter(self): + query = self._base_query().filter('appearances >=', 20) + expected_matches = 6 + # We expect 6, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_query_multiple_filters(self): + query = self._base_query().filter( + 'appearances >=', 26).filter('family =', 'Stark') + expected_matches = 4 + # We expect 4, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_ancestor_query(self): + filtered_query = self._base_query() + + expected_matches = 8 + # We expect 8, but allow the query to get 1 extra. + entities = filtered_query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_query___key___filter(self): + rickard_key = datastore.key.Key( + path=[populate_datastore.ANCESTOR, populate_datastore.RICKARD]) + + query = self._base_query().filter('__key__ =', rickard_key) + expected_matches = 1 + # We expect 1, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_ordered_query(self): + query = self._base_query().order('appearances') + expected_matches = 8 + # We expect 8, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + # Actually check the ordered data returned. + self.assertEqual(entities[0]['name'], self.CHARACTERS[0]['name']) + self.assertEqual(entities[7]['name'], self.CHARACTERS[3]['name']) + + def test_projection_query(self): + filtered_query = self._base_query().projection(['name', 'family']) + + # NOTE: There are 9 responses because of Catelyn. She has both + # Stark and Tully as her families, hence occurs twice in + # the results. + expected_matches = 9 + # We expect 9, but allow the query to get 1 extra. + entities = filtered_query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + arya_entity = entities[0] + arya_dict = dict(arya_entity.items()) + self.assertEqual(arya_dict, {'name': 'Arya', 'family': 'Stark'}) + + catelyn_stark_entity = entities[2] + catelyn_stark_dict = dict(catelyn_stark_entity.items()) + self.assertEqual(catelyn_stark_dict, + {'name': 'Catelyn', 'family': 'Stark'}) + + catelyn_tully_entity = entities[3] + catelyn_tully_dict = dict(catelyn_tully_entity.items()) + self.assertEqual(catelyn_tully_dict, + {'name': 'Catelyn', 'family': 'Tully'}) + + # Check both Catelyn keys are the same. + catelyn_stark_key = catelyn_stark_entity.key() + catelyn_tully_key = catelyn_tully_entity.key() + self.assertEqual(catelyn_stark_key.path(), catelyn_tully_key.path()) + self.assertEqual(catelyn_stark_key.namespace(), + catelyn_tully_key.namespace()) + # Also check the _dataset_id since both retrieved from datastore. + self.assertEqual(catelyn_stark_key._dataset_id, + catelyn_tully_key._dataset_id) + + sansa_entity = entities[8] + sansa_dict = dict(sansa_entity.items()) + self.assertEqual(sansa_dict, {'name': 'Sansa', 'family': 'Stark'}) + + def test_query_paginate_with_offset(self): + query = self._base_query() + offset = 2 + limit = 3 + page_query = query.offset(offset).limit(limit).order('appearances') + # Make sure no query set before fetch. + self.assertRaises(RuntimeError, page_query.cursor) + + # Fetch characters. + entities = page_query.fetch() + self.assertEqual(len(entities), limit) + self.assertEqual(entities[0]['name'], 'Robb') + self.assertEqual(entities[1]['name'], 'Bran') + self.assertEqual(entities[2]['name'], 'Catelyn') + + # Use cursor to begin next query. + cursor = page_query.cursor() + next_query = page_query.with_cursor(cursor).offset(0) + self.assertEqual(next_query.limit(), limit) + # Fetch next set of characters. + entities = next_query.fetch() + self.assertEqual(len(entities), limit) + self.assertEqual(entities[0]['name'], 'Sansa') + self.assertEqual(entities[1]['name'], 'Jon Snow') + self.assertEqual(entities[2]['name'], 'Arya') + + def test_query_paginate_with_start_cursor(self): + query = self._base_query() + offset = 2 + limit = 2 + page_query = query.offset(offset).limit(limit).order('appearances') + # Make sure no query set before fetch. + self.assertRaises(RuntimeError, page_query.cursor) + + # Fetch characters. + entities = page_query.fetch() + self.assertEqual(len(entities), limit) + + # Use cursor to create a fresh query. + cursor = page_query.cursor() + fresh_query = self._base_query() + fresh_query = fresh_query.order('appearances').with_cursor(cursor) + + new_entities = fresh_query.fetch() + characters_remaining = len(self.CHARACTERS) - limit - offset + self.assertEqual(len(new_entities), characters_remaining) + self.assertEqual(new_entities[0]['name'], 'Catelyn') + self.assertEqual(new_entities[3]['name'], 'Arya') + + def test_query_group_by(self): + query = self._base_query().group_by(['alive']) + + expected_matches = 2 + # We expect 2, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + self.assertEqual(entities[0]['name'], 'Catelyn') + self.assertEqual(entities[1]['name'], 'Arya') diff --git a/regression/populate_datastore.py b/regression/populate_datastore.py new file mode 100644 index 000000000000..d4ffb37a5f69 --- /dev/null +++ b/regression/populate_datastore.py @@ -0,0 +1,90 @@ +"""Script to populate datastore with regression test data.""" + + +from gcloud import datastore +# This assumes the command is being run via tox hence the +# repository root is the current directory. +from regression import regression_utils + + +ANCESTOR = {'kind': 'Book', 'name': 'GoT'} +RICKARD = {'kind': 'Character', 'name': 'Rickard'} +EDDARD = {'kind': 'Character', 'name': 'Eddard'} +KEY_PATHS = [ + [ANCESTOR, RICKARD], + [ANCESTOR, RICKARD, EDDARD], + [ANCESTOR, + {'kind': 'Character', 'name': 'Catelyn'}], + [ANCESTOR, RICKARD, EDDARD, + {'kind': 'Character', 'name': 'Arya'}], + [ANCESTOR, RICKARD, EDDARD, + {'kind': 'Character', 'name': 'Sansa'}], + [ANCESTOR, RICKARD, EDDARD, + {'kind': 'Character', 'name': 'Robb'}], + [ANCESTOR, RICKARD, EDDARD, + {'kind': 'Character', 'name': 'Bran'}], + [ANCESTOR, RICKARD, EDDARD, + {'kind': 'Character', 'name': 'Jon Snow'}], +] +CHARACTERS = [ + { + 'name': u'Rickard', + 'family': u'Stark', + 'appearances': 0, + 'alive': False, + }, { + 'name': u'Eddard', + 'family': u'Stark', + 'appearances': 9, + 'alive': False, + }, { + 'name': u'Catelyn', + 'family': [u'Stark', u'Tully'], + 'appearances': 26, + 'alive': False, + }, { + 'name': u'Arya', + 'family': u'Stark', + 'appearances': 33, + 'alive': True, + }, { + 'name': u'Sansa', + 'family': u'Stark', + 'appearances': 31, + 'alive': True, + }, { + 'name': u'Robb', + 'family': u'Stark', + 'appearances': 22, + 'alive': False, + }, { + 'name': u'Bran', + 'family': u'Stark', + 'appearances': 25, + 'alive': True, + }, { + 'name': u'Jon Snow', + 'family': u'Stark', + 'appearances': 32, + 'alive': True, + }, +] + + +def add_characters(): + dataset = regression_utils.get_dataset() + with dataset.transaction(): + for key_path, character in zip(KEY_PATHS, CHARACTERS): + if key_path[-1]['name'] != character['name']: + raise ValueError(('Character and key don\'t agree', + key_path, character)) + key = datastore.key.Key(path=key_path) + entity = datastore.entity.Entity(dataset=dataset).key(key) + entity.update(character) + entity.save() + print 'Adding Character %s %s' % (character['name'], + character['family']) + + +if __name__ == '__main__': + add_characters() diff --git a/regression/regression_utils.py b/regression/regression_utils.py index 2a8c39ddaf5c..5b24ef6997cf 100644 --- a/regression/regression_utils.py +++ b/regression/regression_utils.py @@ -1,11 +1,14 @@ import os import sys +from gcloud import datastore + # Defaults from shell environ. May be None. DATASET_ID = os.getenv('GCLOUD_TESTS_DATASET_ID') CLIENT_EMAIL = os.getenv('GCLOUD_TESTS_CLIENT_EMAIL') KEY_FILENAME = os.getenv('GCLOUD_TESTS_KEY_FILE') +DATASETS = {} ENVIRON_ERROR_MSG = """\ To run the regression tests, you need to set some environment variables. @@ -23,3 +26,13 @@ def get_environ(): 'client_email': CLIENT_EMAIL, 'key_filename': KEY_FILENAME, } + + +def get_dataset(): + environ = get_environ() + get_dataset_args = (environ['dataset_id'], environ['client_email'], + environ['key_filename']) + if get_dataset_args not in DATASETS: + # Cache return value for the environment. + DATASETS[get_dataset_args] = datastore.get_dataset(*get_dataset_args) + return DATASETS[get_dataset_args] diff --git a/regression/run_regression.py b/regression/run_regression.py index ad7bf691778e..e83cacc27184 100644 --- a/regression/run_regression.py +++ b/regression/run_regression.py @@ -1,6 +1,11 @@ import argparse +import sys import unittest2 +# This assumes the command is being run via tox hence the +# repository root is the current directory. +from regression import regression_utils + def get_parser(): parser = argparse.ArgumentParser( @@ -15,13 +20,17 @@ def run_module_tests(module_name): suite = unittest2.TestSuite() tests = unittest2.defaultTestLoader.loadTestsFromName(module_name) suite.addTest(tests) - unittest2.TextTestRunner(verbosity=2).run(suite) + return unittest2.TextTestRunner(verbosity=2).run(suite) def main(): parser = get_parser() args = parser.parse_args() - run_module_tests(args.package) + # Make sure environ is set before running test. + regression_utils.get_environ() + test_result = run_module_tests(args.package) + if not test_result.wasSuccessful(): + sys.exit(1) if __name__ == '__main__': diff --git a/scripts/merge.sh b/scripts/merge.sh deleted file mode 100755 index cf57c0269813..000000000000 --- a/scripts/merge.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ev - -# if merging to master and not a pull request, update docs -if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then - # generate new set of json files in docs/json/master - tox -e docs - git submodule add -b gh-pages https://${GH_OAUTH_TOKEN}@github.com/${GH_OWNER}/${GH_PROJECT_NAME} ghpages - cp -R docs/_build/html/* ghpages/ - cd ghpages - git add . - # commit to gh-pages branch to apply changes - git config user.name "selfiebot" - git commit -m "Update docs after merge to master" - git push https://${GH_OAUTH_TOKEN}@github.com/${GH_OWNER}/${GH_PROJECT_NAME} HEAD:gh-pages -fi diff --git a/scripts/run_regression.sh b/scripts/run_regression.sh new file mode 100755 index 000000000000..d70cce4c43f5 --- /dev/null +++ b/scripts/run_regression.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -ev + +# If we're on Travis, we need to set up the environment. +if [[ "${TRAVIS}" == "true" ]]; then + # If merging to master and not a pull request, run regression test. + if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + echo "Running in Travis during merge, decrypting stored key file." + + # Convert encrypted key file into decrypted file to be used. + openssl aes-256-cbc -K $encrypted_a1b222e8c14d_key \ + -iv $encrypted_a1b222e8c14d_iv \ + -in regression/key.p12.enc \ + -out $GCLOUD_TESTS_KEY_FILE -d + else + echo "Running in Travis during non-merge to master, doing nothing." + exit + fi +fi + +# Run the regression tests for each tested package. +python regression/run_regression.py --package datastore diff --git a/scripts/update_docs.sh b/scripts/update_docs.sh new file mode 100755 index 000000000000..67c0bc276ec8 --- /dev/null +++ b/scripts/update_docs.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -ev + +# If merging to master and not a pull request, update docs. +if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + # Generate new set of json files in docs/json/master. + tox -e docs + git submodule add -b gh-pages https://${GH_OAUTH_TOKEN}@github.com/${GH_OWNER}/${GH_PROJECT_NAME} ghpages + cp -R docs/_build/html/* ghpages/ + cd ghpages + git add . + # Commit to gh-pages branch to apply changes. + git config user.name "selfiebot" + git commit -m "Update docs after merge to master." + git push https://${GH_OAUTH_TOKEN}@github.com/${GH_OWNER}/${GH_PROJECT_NAME} HEAD:gh-pages +fi diff --git a/tox.ini b/tox.ini index 85b567ec2ca9..768d3ca1aa71 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = - py26,py27,cover,docs,lint + py26,py27,cover,docs,lint,regression [testenv:py26] commands = @@ -33,7 +33,7 @@ deps = basepython = python2.7 commands = - python -c "import shutil; shutil.rmtree('docs/_build')" + python -c "import shutil; shutil.rmtree('docs/_build', ignore_errors=True)" sphinx-build -W -b html -d docs/_build/doctrees docs docs/_build/html deps = Sphinx @@ -57,6 +57,6 @@ deps = basepython = python2.7 commands = - python regression/run_regression.py --package datastore + {toxinidir}/scripts/run_regression.sh deps = unittest2