From d292298500833b21e9ea9676e69db8b912124811 Mon Sep 17 00:00:00 2001 From: Rebecca Taylor Date: Wed, 4 Sep 2019 14:19:33 -0700 Subject: [PATCH 001/323] Add generated code samples. (#9153) --- language/v1/language_classify_gcs.py | 85 +++++++++++++ language/v1/language_classify_text.py | 83 +++++++++++++ language/v1/language_entities_gcs.py | 105 ++++++++++++++++ language/v1/language_entities_text.py | 100 +++++++++++++++ language/v1/language_entity_sentiment_gcs.py | 109 ++++++++++++++++ language/v1/language_entity_sentiment_text.py | 106 ++++++++++++++++ language/v1/language_sentiment_gcs.py | 95 ++++++++++++++ language/v1/language_sentiment_text.py | 90 ++++++++++++++ language/v1/language_syntax_gcs.py | 117 ++++++++++++++++++ language/v1/language_syntax_text.py | 112 +++++++++++++++++ language/v1/test/analyzing_entities.test.yaml | 101 +++++++++++++++ .../test/analyzing_entity_sentiment.test.yaml | 63 ++++++++++ .../v1/test/analyzing_sentiment.test.yaml | 74 +++++++++++ language/v1/test/analyzing_syntax.test.yaml | 72 +++++++++++ .../v1/test/classifying_content.test.yaml | 51 ++++++++ language/v1/test/samples.manifest.yaml | 38 ++++++ 16 files changed, 1401 insertions(+) create mode 100644 language/v1/language_classify_gcs.py create mode 100644 language/v1/language_classify_text.py create mode 100644 language/v1/language_entities_gcs.py create mode 100644 language/v1/language_entities_text.py create mode 100644 language/v1/language_entity_sentiment_gcs.py create mode 100644 language/v1/language_entity_sentiment_text.py create mode 100644 language/v1/language_sentiment_gcs.py create mode 100644 language/v1/language_sentiment_text.py create mode 100644 language/v1/language_syntax_gcs.py create mode 100644 language/v1/language_syntax_text.py create mode 100644 language/v1/test/analyzing_entities.test.yaml create mode 100644 language/v1/test/analyzing_entity_sentiment.test.yaml create mode 100644 language/v1/test/analyzing_sentiment.test.yaml create mode 100644 language/v1/test/analyzing_syntax.test.yaml create mode 100644 language/v1/test/classifying_content.test.yaml create mode 100644 language/v1/test/samples.manifest.yaml diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py new file mode 100644 index 000000000000..db5958011cfe --- /dev/null +++ b/language/v1/language_classify_gcs.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_classify_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Classify Content (GCS) +# description: Classifying Content in text file stored in Cloud Storage +# usage: python3 samples/v1/language_classify_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/classify-entertainment.txt"] + +# [START language_classify_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_classify_text(gcs_content_uri): + """ + Classifying Content in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + The text file must include at least 20 words. + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + response = client.classify_text(document) + # Loop through classified categories returned from the API + for category in response.categories: + # Get the name of the category representing the document. + # See the predefined taxonomy of categories: + # https://cloud.google.com/natural-language/docs/categories + print(u"Category name: {}".format(category.name)) + # Get the confidence. Number representing how certain the classifier + # is that this category represents the provided text. + print(u"Confidence: {}".format(category.confidence)) + + +# [END language_classify_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/classify-entertainment.txt", + ) + args = parser.parse_args() + + sample_classify_text(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py new file mode 100644 index 000000000000..2ecfd70bd588 --- /dev/null +++ b/language/v1/language_classify_text.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_classify_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Classify Content +# description: Classifying Content in a String +# usage: python3 samples/v1/language_classify_text.py [--text_content "That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows."] + +# [START language_classify_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_classify_text(text_content): + """ + Classifying Content in a String + + Args: + text_content The text content to analyze. Must include at least 20 words. + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + response = client.classify_text(document) + # Loop through classified categories returned from the API + for category in response.categories: + # Get the name of the category representing the document. + # See the predefined taxonomy of categories: + # https://cloud.google.com/natural-language/docs/categories + print(u"Category name: {}".format(category.name)) + # Get the confidence. Number representing how certain the classifier + # is that this category represents the provided text. + print(u"Confidence: {}".format(category.confidence)) + + +# [END language_classify_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--text_content", + type=str, + default="That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.", + ) + args = parser.parse_args() + + sample_classify_text(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_entities_gcs.py b/language/v1/language_entities_gcs.py new file mode 100644 index 000000000000..edd3238ac88a --- /dev/null +++ b/language/v1/language_entities_gcs.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entities_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entities (GCS) +# description: Analyzing Entities in text file stored in Cloud Storage +# usage: python3 samples/v1/language_entities_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity.txt"] + +# [START language_entities_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entities(gcs_content_uri): + """ + Analyzing Entities in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entities(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{}: {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entities_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/entity.txt", + ) + args = parser.parse_args() + + sample_analyze_entities(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py new file mode 100644 index 000000000000..2948f44d3500 --- /dev/null +++ b/language/v1/language_entities_text.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entities_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entities +# description: Analyzing Entities in a String +# usage: python3 samples/v1/language_entities_text.py [--text_content "California is a state."] + +# [START language_entities_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entities(text_content): + """ + Analyzing Entities in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'California is a state.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entities(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{}: {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entities_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--text_content", type=str, default="California is a state.") + args = parser.parse_args() + + sample_analyze_entities(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_entity_sentiment_gcs.py b/language/v1/language_entity_sentiment_gcs.py new file mode 100644 index 000000000000..87fb74de789e --- /dev/null +++ b/language/v1/language_entity_sentiment_gcs.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entity_sentiment_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entity Sentiment (GCS) +# description: Analyzing Entity Sentiment in text file stored in Cloud Storage +# usage: python3 samples/v1/language_entity_sentiment_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity-sentiment.txt"] + +# [START language_entity_sentiment_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entity_sentiment(gcs_content_uri): + """ + Analyzing Entity Sentiment in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/entity-sentiment.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Get the aggregate sentiment expressed for this entity in the provided document. + sentiment = entity.sentiment + print(u"Entity sentiment score: {}".format(sentiment.score)) + print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{} = {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entity_sentiment_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/entity-sentiment.txt", + ) + args = parser.parse_args() + + sample_analyze_entity_sentiment(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_entity_sentiment_text.py b/language/v1/language_entity_sentiment_text.py new file mode 100644 index 000000000000..6f914980a2e6 --- /dev/null +++ b/language/v1/language_entity_sentiment_text.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entity_sentiment_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entity Sentiment +# description: Analyzing Entity Sentiment in a String +# usage: python3 samples/v1/language_entity_sentiment_text.py [--text_content "Grapes are good. Bananas are bad."] + +# [START language_entity_sentiment_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entity_sentiment(text_content): + """ + Analyzing Entity Sentiment in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'Grapes are good. Bananas are bad.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Get the aggregate sentiment expressed for this entity in the provided document. + sentiment = entity.sentiment + print(u"Entity sentiment score: {}".format(sentiment.score)) + print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{} = {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entity_sentiment_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--text_content", type=str, default="Grapes are good. Bananas are bad." + ) + args = parser.parse_args() + + sample_analyze_entity_sentiment(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_sentiment_gcs.py b/language/v1/language_sentiment_gcs.py new file mode 100644 index 000000000000..366009668ba3 --- /dev/null +++ b/language/v1/language_sentiment_gcs.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_sentiment_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Sentiment (GCS) +# description: Analyzing Sentiment in text file stored in Cloud Storage +# usage: python3 samples/v1/language_sentiment_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/sentiment-positive.txt"] + +# [START language_sentiment_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_sentiment(gcs_content_uri): + """ + Analyzing Sentiment in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/sentiment-positive.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_sentiment(document, encoding_type=encoding_type) + # Get overall sentiment of the input document + print(u"Document sentiment score: {}".format(response.document_sentiment.score)) + print( + u"Document sentiment magnitude: {}".format( + response.document_sentiment.magnitude + ) + ) + # Get sentiment for all sentences in the document + for sentence in response.sentences: + print(u"Sentence text: {}".format(sentence.text.content)) + print(u"Sentence sentiment score: {}".format(sentence.sentiment.score)) + print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_sentiment_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/sentiment-positive.txt", + ) + args = parser.parse_args() + + sample_analyze_sentiment(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py new file mode 100644 index 000000000000..c1325678bf39 --- /dev/null +++ b/language/v1/language_sentiment_text.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_sentiment_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Sentiment +# description: Analyzing Sentiment in a String +# usage: python3 samples/v1/language_sentiment_text.py [--text_content "I am so happy and joyful."] + +# [START language_sentiment_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_sentiment(text_content): + """ + Analyzing Sentiment in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'I am so happy and joyful.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_sentiment(document, encoding_type=encoding_type) + # Get overall sentiment of the input document + print(u"Document sentiment score: {}".format(response.document_sentiment.score)) + print( + u"Document sentiment magnitude: {}".format( + response.document_sentiment.magnitude + ) + ) + # Get sentiment for all sentences in the document + for sentence in response.sentences: + print(u"Sentence text: {}".format(sentence.text.content)) + print(u"Sentence sentiment score: {}".format(sentence.sentiment.score)) + print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_sentiment_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--text_content", type=str, default="I am so happy and joyful.") + args = parser.parse_args() + + sample_analyze_sentiment(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py new file mode 100644 index 000000000000..74d88787d426 --- /dev/null +++ b/language/v1/language_syntax_gcs.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_syntax_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Syntax (GCS) +# description: Analyzing Syntax in text file stored in Cloud Storage +# usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/syntax-sentence.txt"] + +# [START language_syntax_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_syntax(gcs_content_uri): + """ + Analyzing Syntax in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_syntax(document, encoding_type=encoding_type) + # Loop through tokens returned from the API + for token in response.tokens: + # Get the text content of this token. Usually a word or punctuation. + text = token.text + print(u"Token text: {}".format(text.content)) + print( + u"Location of this token in overall document: {}".format(text.begin_offset) + ) + # Get the part of speech information for this token. + # Parts of spech are as defined in: + # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf + part_of_speech = token.part_of_speech + # Get the tag, e.g. NOUN, ADJ for Adjective, et al. + print( + u"Part of Speech tag: {}".format( + enums.PartOfSpeech.Tag(part_of_speech.tag).name + ) + ) + # Get the voice, e.g. ACTIVE or PASSIVE + print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. + print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + # See API reference for additional Part of Speech information available + # Get the lemma of the token. Wikipedia lemma description + # https://en.wikipedia.org/wiki/Lemma_(morphology) + print(u"Lemma: {}".format(token.lemma)) + # Get the dependency tree parse information for this token. + # For more information on dependency labels: + # http://www.aclweb.org/anthology/P13-2017 + dependency_edge = token.dependency_edge + print(u"Head token index: {}".format(dependency_edge.head_token_index)) + print( + u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_syntax_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/syntax-sentence.txt", + ) + args = parser.parse_args() + + sample_analyze_syntax(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py new file mode 100644 index 000000000000..4b11d4d04b8f --- /dev/null +++ b/language/v1/language_syntax_text.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_syntax_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Syntax +# description: Analyzing Syntax in a String +# usage: python3 samples/v1/language_syntax_text.py [--text_content "This is a short sentence."] + +# [START language_syntax_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_syntax(text_content): + """ + Analyzing Syntax in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'This is a short sentence.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_syntax(document, encoding_type=encoding_type) + # Loop through tokens returned from the API + for token in response.tokens: + # Get the text content of this token. Usually a word or punctuation. + text = token.text + print(u"Token text: {}".format(text.content)) + print( + u"Location of this token in overall document: {}".format(text.begin_offset) + ) + # Get the part of speech information for this token. + # Parts of spech are as defined in: + # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf + part_of_speech = token.part_of_speech + # Get the tag, e.g. NOUN, ADJ for Adjective, et al. + print( + u"Part of Speech tag: {}".format( + enums.PartOfSpeech.Tag(part_of_speech.tag).name + ) + ) + # Get the voice, e.g. ACTIVE or PASSIVE + print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. + print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + # See API reference for additional Part of Speech information available + # Get the lemma of the token. Wikipedia lemma description + # https://en.wikipedia.org/wiki/Lemma_(morphology) + print(u"Lemma: {}".format(token.lemma)) + # Get the dependency tree parse information for this token. + # For more information on dependency labels: + # http://www.aclweb.org/anthology/P13-2017 + dependency_edge = token.dependency_edge + print(u"Head token index: {}".format(dependency_edge.head_token_index)) + print( + u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_syntax_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--text_content", type=str, default="This is a short sentence.") + args = parser.parse_args() + + sample_analyze_syntax(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/language/v1/test/analyzing_entities.test.yaml b/language/v1/test/analyzing_entities.test.yaml new file mode 100644 index 000000000000..5fafd01eaa89 --- /dev/null +++ b/language/v1/test/analyzing_entities.test.yaml @@ -0,0 +1,101 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Entities [code sample tests]" + cases: + + - name: language_entities_text - Analyzing the Entities of a text string (default value) + spec: + # Default value: "California is a state." + - call: {sample: language_entities_text} + - assert_contains: + - {literal: "Representative name for the entity: California"} + - {literal: "Entity type: LOCATION"} + - {literal: "Salience score:"} + - {literal: "wikipedia_url: https://en.wikipedia.org/wiki/California"} + - {literal: "mid: /m/01n7q"} + - {literal: "Mention text: California"} + - {literal: "Mention type: PROPER"} + - {literal: "Mention text: state"} + - {literal: "Mention type: COMMON"} + - {literal: "Language of the text: en"} + + - name: language_entities_text - Analyzing the Entities of a text string (*custom value*) + spec: + # Custom value: "Alice is a person. She lives in California." + - call: + sample: language_entities_text + params: + text_content: {literal: "Alice is a person. She lives in California."} + - assert_contains: + - {literal: "Representative name for the entity: Alice"} + - {literal: "Entity type: PERSON"} + - {literal: "Mention text: Alice"} + - {literal: "Mention type: PROPER"} + - {literal: "Mention text: person"} + - {literal: "Mention type: COMMON"} + - {literal: "Representative name for the entity: California"} + - {literal: "Entity type: LOCATION"} + - {literal: "wikipedia_url: https://en.wikipedia.org/wiki/California"} + - {literal: "mid: /m/01n7q"} + - {literal: "Language of the text: en"} + + - name: language_entities_text - Analyzing the Entities of a text string (*metadata attributes*) + spec: + # Try out some of the metadata attributes which should be available for dates, addresses, etc. + # In case fake (555) area code numbers don't work, using United States Naval Observatory number. + # Custom value: "I called 202-762-1401 on January 31, 2019 from 1600 Amphitheatre Parkway, Mountain View, CA." + - call: + sample: language_entities_text + params: + text_content: + literal: "I called 202-762-1401 on January 31, 2019 from 1600 Amphitheatre Parkway, Mountain View, CA." + # The results may change, but it's fair to say that at least one of the following types were detected: + - assert_contains_any: + - literal: "Entity type: DATE" + - literal: "Entity type: ADDRESS" + - literal: "Entity type: PHONE_NUMBER" + # Check that at least some of the supporting metadata for an entity was present in the response + - assert_contains_any: + - literal: "month: 1" + - literal: "day: 31" + - literal: "year: 2019" + - literal: "street_number: 1600" + - literal: "street_name: Amphitheatre Parkway" + - literal: "area_code: 202" + - literal: "number: 7621401" + + - name: language_entities_gcs - Analyzing the Entities of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/entity.txt + # => "California is a state." + - call: {sample: language_entities_gcs} + - assert_contains: + - {literal: "Representative name for the entity: California"} + - {literal: "Entity type: LOCATION"} + - {literal: "Salience score:"} + - {literal: "wikipedia_url: https://en.wikipedia.org/wiki/California"} + - {literal: "mid: /m/01n7q"} + - {literal: "Mention text: California"} + - {literal: "Mention type: PROPER"} + - {literal: "Mention text: state"} + - {literal: "Mention type: COMMON"} + - {literal: "Language of the text: en"} + + - name: language_entities_gcs - Analyzing the Entities of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/entity-sentiment.txt + # => "Grapes are good. Bananas are bad." + - call: + sample: language_entities_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/entity-sentiment.txt" + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Mention text: Grapes"} + - {literal: "Mention type: COMMON"} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Mention text: Bananas"} + - {literal: "Language of the text: en"} diff --git a/language/v1/test/analyzing_entity_sentiment.test.yaml b/language/v1/test/analyzing_entity_sentiment.test.yaml new file mode 100644 index 000000000000..beb8fb4a89a7 --- /dev/null +++ b/language/v1/test/analyzing_entity_sentiment.test.yaml @@ -0,0 +1,63 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Entity Sentiment [code sample tests]" + cases: + + - name: language_entity_sentiment_text - Analyzing Entity Sentiment of a text string (default value) + spec: + # Default value: "Grapes are good. Bananas are bad." + - call: {sample: language_entity_sentiment_text} + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + + - name: language_entity_sentiment_text - Analyzing Entity Sentiment of a text string (*custom value*) + spec: + # Custom value: "Grapes are actually not very good. But Bananas are great." + - call: + sample: language_entity_sentiment_text + params: + text_content: {literal: "Grapes are actually not very good. But Bananas are great."} + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + + - name: language_entity_sentiment_gcs - Analyzing Entity Sentiment of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/entity-sentiment.txt + # => "Grapes are good. Bananas are bad." + - call: {sample: language_entity_sentiment_gcs} + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + + - name: language_entity_sentiment_gcs - Analyzing Entity Sentiment of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/entity-sentiment-reverse.txt + # => "Grapes are actually not very good. But Bananas are great." + - call: + sample: language_entity_sentiment_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/entity-sentiment-reverse.txt" + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} diff --git a/language/v1/test/analyzing_sentiment.test.yaml b/language/v1/test/analyzing_sentiment.test.yaml new file mode 100644 index 000000000000..55b5fdcb24d2 --- /dev/null +++ b/language/v1/test/analyzing_sentiment.test.yaml @@ -0,0 +1,74 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Sentiment [code sample tests]" + cases: + + - name: language_sentiment_text - Analyzing the sentiment of a text string (default value) + spec: + # Default value: "I am so happy and joyful." + - call: {sample: language_sentiment_text} + - assert_contains: + - {literal: "Document sentiment score: 0."} + - {literal: "Document sentiment magnitude: 0."} + - {literal: "Sentence text: I am so happy and joyful."} + - {literal: "Sentence sentiment score: 0."} + - {literal: "Sentence sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + # There should be no negative sentiment scores for this value. + - assert_not_contains: + - {literal: "Document sentiment score: -0."} + - {literal: "Sentence sentiment score: -0."} + + - name: language_sentiment_text - Analyzing the sentiment of a text string (*custom value*) + spec: + # Custom value: "I am very happy. I am angry and sad." + - call: + sample: language_sentiment_text + params: + text_content: {literal: "I am very happy. I am angry and sad."} + - assert_contains: + - {literal: "Sentence text: I am very happy"} + - {literal: "Sentence sentiment score: 0."} + - {literal: "Sentence text: I am angry and sad"} + - {literal: "Sentence sentiment score: -0."} + - {literal: "Language of the text: en"} + + - name: language_sentiment_gcs - Analyzing the sentiment of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/sentiment-positive.txt + # => "I am so happy and joyful." + - call: {sample: language_sentiment_gcs} + - assert_contains: + - {literal: "Document sentiment score: 0."} + - {literal: "Document sentiment magnitude: 0."} + - {literal: "Sentence text: I am so happy and joyful."} + - {literal: "Sentence sentiment score: 0."} + - {literal: "Sentence sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + # There should be no negative sentiment scores for this value. + - assert_not_contains: + - {literal: "Document sentiment score: -0."} + - {literal: "Sentence sentiment score: -0."} + + - name: language_sentiment_gcs - Analyzing the sentiment of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/sentiment-negative.txt + # => "I am so sad and upset." + - call: + sample: language_sentiment_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/sentiment-negative.txt" + - assert_contains: + - {literal: "Document sentiment score: -0."} + - {literal: "Document sentiment magnitude: 0."} + - {literal: "Sentence text: I am so sad and upset."} + - {literal: "Sentence sentiment score: -0."} + - {literal: "Sentence sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + # There should be no positive sentiment scores for this value. + - assert_not_contains: + - {literal: "Document sentiment score: 0."} + - {literal: "Sentence sentiment score: 0."} diff --git a/language/v1/test/analyzing_syntax.test.yaml b/language/v1/test/analyzing_syntax.test.yaml new file mode 100644 index 000000000000..e89d465c1616 --- /dev/null +++ b/language/v1/test/analyzing_syntax.test.yaml @@ -0,0 +1,72 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Syntax [code sample tests]" + cases: + + - name: language_syntax_text - Analyzing the syntax of a text string (default value) + spec: + # Default value: "This is a short sentence." + - call: {sample: language_syntax_text} + - assert_contains: + - {literal: "Token text: is"} + - {literal: "Part of Speech tag: VERB"} + - {literal: "Tense: PRESENT"} + - {literal: "Lemma: be"} + - {literal: "Token text: short"} + - {literal: "Part of Speech tag: ADJ"} + - {literal: "Lemma: short"} + - {literal: "Language of the text: en"} + + - name: language_syntax_text - Analyzing the syntax of a text string (*custom value*) + spec: + # Custom value: "Alice runs. Bob ran." + - call: + sample: language_syntax_text + params: + text_content: {literal: "Alice runs. Bob ran."} + - assert_contains: + - {literal: "Token text: Alice"} + - {literal: "Location of this token in overall document: 0"} + - {literal: "Part of Speech tag: NOUN"} + - {literal: "Label: NSUBJ"} + - {literal: "Token text: runs"} + - {literal: "Part of Speech tag: VERB"} + - {literal: "Tense: PRESENT"} + - {literal: "Lemma: run"} + - {literal: "Token text: ran"} + - {literal: "Tense: PAST"} + - {literal: "Language of the text: en"} + + - name: language_syntax_gcs - Analyzing the syntax of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/syntax-sentence.txt + # => "This is a short sentence." + - call: {sample: language_syntax_gcs} + - assert_contains: + - {literal: "Token text: is"} + - {literal: "Part of Speech tag: VERB"} + - {literal: "Tense: PRESENT"} + - {literal: "Lemma: be"} + - {literal: "Token text: short"} + - {literal: "Part of Speech tag: ADJ"} + - {literal: "Lemma: short"} + - {literal: "Language of the text: en"} + + - name: language_syntax_gcs - Analyzing the syntax of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/hello.txt + # => "Hello, world!" + - call: + sample: language_syntax_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/hello.txt" + - assert_contains: + - {literal: "Token text: Hello"} + - {literal: "Token text: World"} + - {literal: "Part of Speech tag: NOUN"} + - {literal: "Token text: !"} + - {literal: "Part of Speech tag: PUNCT"} + - {literal: "Language of the text: en"} diff --git a/language/v1/test/classifying_content.test.yaml b/language/v1/test/classifying_content.test.yaml new file mode 100644 index 000000000000..5cfc76696b25 --- /dev/null +++ b/language/v1/test/classifying_content.test.yaml @@ -0,0 +1,51 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Classifying Content [code sample tests]" + cases: + + - name: language_classify_text - Classifying Content of a text string (default value) + spec: + # Default value: "That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows." + - call: {sample: language_classify_text} + - assert_contains_any: + - {literal: "TV"} + - {literal: "Movies"} + - {literal: "Entertainment"} + + - name: language_classify_text - Classifying Content of a text string (*custom value*) + spec: + # Custom value: "Let's drink coffee and eat bagels at a coffee shop. I want muffins, croisants, coffee and baked goods." + - call: + sample: language_classify_text + params: + text_content: {literal: "Let's drink coffee and eat bagels at a coffee shop. I want muffins, croisants, coffee and baked goods."} + - assert_contains_any: + - {literal: "Food"} + - {literal: "Drink"} + - {literal: "Coffee"} + + - name: language_classify_gcs - Classifying Content of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/classify-entertainment.txt + # => "This is about film and movies and television and acting and movie theatres and theatre and drama and entertainment and the arts." + - call: {sample: language_classify_gcs} + - assert_contains_any: + - {literal: "TV"} + - {literal: "Movies"} + - {literal: "Entertainment"} + + - name: language_classify_gcs - Classifying Content of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/android.txt + # => "Android is a mobile operating system developed by Google, based on the Linux kernel and..." + - call: + sample: language_classify_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/android.txt" + - assert_contains_any: + - {literal: "Mobile"} + - {literal: "Phone"} + - {literal: "Internet"} diff --git a/language/v1/test/samples.manifest.yaml b/language/v1/test/samples.manifest.yaml new file mode 100644 index 000000000000..aa270425584c --- /dev/null +++ b/language/v1/test/samples.manifest.yaml @@ -0,0 +1,38 @@ +type: manifest/samples +schema_version: 3 +base: &common + env: 'python' + bin: 'python3' + chdir: '{@manifest_dir}/../..' + basepath: '.' +samples: +- <<: *common + path: '{basepath}/v1/language_classify_gcs.py' + sample: 'language_classify_gcs' +- <<: *common + path: '{basepath}/v1/language_classify_text.py' + sample: 'language_classify_text' +- <<: *common + path: '{basepath}/v1/language_entities_gcs.py' + sample: 'language_entities_gcs' +- <<: *common + path: '{basepath}/v1/language_entities_text.py' + sample: 'language_entities_text' +- <<: *common + path: '{basepath}/v1/language_entity_sentiment_gcs.py' + sample: 'language_entity_sentiment_gcs' +- <<: *common + path: '{basepath}/v1/language_entity_sentiment_text.py' + sample: 'language_entity_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_sentiment_gcs.py' + sample: 'language_sentiment_gcs' +- <<: *common + path: '{basepath}/v1/language_sentiment_text.py' + sample: 'language_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_syntax_gcs.py' + sample: 'language_syntax_gcs' +- <<: *common + path: '{basepath}/v1/language_syntax_text.py' + sample: 'language_syntax_text' From 66f4f4cae0ce8943b369a674025e3595debb2b1a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 11 Sep 2019 09:20:38 -0700 Subject: [PATCH 002/323] Reorder samples manifest (via synth). (#9209) --- language/v1/test/samples.manifest.yaml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/language/v1/test/samples.manifest.yaml b/language/v1/test/samples.manifest.yaml index aa270425584c..28d2760ff3db 100644 --- a/language/v1/test/samples.manifest.yaml +++ b/language/v1/test/samples.manifest.yaml @@ -6,33 +6,33 @@ base: &common chdir: '{@manifest_dir}/../..' basepath: '.' samples: +- <<: *common + path: '{basepath}/v1/language_entity_sentiment_gcs.py' + sample: 'language_entity_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_classify_gcs.py' sample: 'language_classify_gcs' - <<: *common - path: '{basepath}/v1/language_classify_text.py' - sample: 'language_classify_text' -- <<: *common - path: '{basepath}/v1/language_entities_gcs.py' - sample: 'language_entities_gcs' + path: '{basepath}/v1/language_syntax_gcs.py' + sample: 'language_syntax_gcs' - <<: *common path: '{basepath}/v1/language_entities_text.py' sample: 'language_entities_text' - <<: *common - path: '{basepath}/v1/language_entity_sentiment_gcs.py' - sample: 'language_entity_sentiment_gcs' + path: '{basepath}/v1/language_classify_text.py' + sample: 'language_classify_text' +- <<: *common + path: '{basepath}/v1/language_syntax_text.py' + sample: 'language_syntax_text' - <<: *common path: '{basepath}/v1/language_entity_sentiment_text.py' sample: 'language_entity_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_entities_gcs.py' + sample: 'language_entities_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_gcs.py' sample: 'language_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_text.py' sample: 'language_sentiment_text' -- <<: *common - path: '{basepath}/v1/language_syntax_gcs.py' - sample: 'language_syntax_gcs' -- <<: *common - path: '{basepath}/v1/language_syntax_text.py' - sample: 'language_syntax_text' From ee0fa30288630961dd44e309aa44bbf9c9d9c64f Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 26 Sep 2019 10:14:14 -0700 Subject: [PATCH 003/323] codegen(language): reorder samples (#9310) --- language/v1/test/samples.manifest.yaml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/language/v1/test/samples.manifest.yaml b/language/v1/test/samples.manifest.yaml index 28d2760ff3db..aa270425584c 100644 --- a/language/v1/test/samples.manifest.yaml +++ b/language/v1/test/samples.manifest.yaml @@ -6,33 +6,33 @@ base: &common chdir: '{@manifest_dir}/../..' basepath: '.' samples: -- <<: *common - path: '{basepath}/v1/language_entity_sentiment_gcs.py' - sample: 'language_entity_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_classify_gcs.py' sample: 'language_classify_gcs' - <<: *common - path: '{basepath}/v1/language_syntax_gcs.py' - sample: 'language_syntax_gcs' + path: '{basepath}/v1/language_classify_text.py' + sample: 'language_classify_text' +- <<: *common + path: '{basepath}/v1/language_entities_gcs.py' + sample: 'language_entities_gcs' - <<: *common path: '{basepath}/v1/language_entities_text.py' sample: 'language_entities_text' - <<: *common - path: '{basepath}/v1/language_classify_text.py' - sample: 'language_classify_text' -- <<: *common - path: '{basepath}/v1/language_syntax_text.py' - sample: 'language_syntax_text' + path: '{basepath}/v1/language_entity_sentiment_gcs.py' + sample: 'language_entity_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_entity_sentiment_text.py' sample: 'language_entity_sentiment_text' -- <<: *common - path: '{basepath}/v1/language_entities_gcs.py' - sample: 'language_entities_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_gcs.py' sample: 'language_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_text.py' sample: 'language_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_syntax_gcs.py' + sample: 'language_syntax_gcs' +- <<: *common + path: '{basepath}/v1/language_syntax_text.py' + sample: 'language_syntax_text' From 270bad5d5774be7fc59ea97eafc4e2f703c6e736 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Thu, 16 Jan 2020 05:57:52 -0800 Subject: [PATCH 004/323] docs(language): fixes typo in Natural Language samples (#10134) Changes "Parts of spech" to "Parts of speech". --- language/v1/language_syntax_gcs.py | 2 +- language/v1/language_syntax_text.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py index 74d88787d426..732f77df2a92 100644 --- a/language/v1/language_syntax_gcs.py +++ b/language/v1/language_syntax_gcs.py @@ -64,7 +64,7 @@ def sample_analyze_syntax(gcs_content_uri): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Parts of speech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index 4b11d4d04b8f..d1c3104ea890 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -63,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Parts of speech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. From bf04a971638331efd6b0f332b8654774dce16a6a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 29 Jan 2020 16:52:45 -0800 Subject: [PATCH 005/323] docs(language): change docstring formatting; bump copyright year to 2020 (via synth) (#10234) --- language/v1/language_classify_gcs.py | 2 +- language/v1/language_classify_text.py | 2 +- language/v1/language_entities_gcs.py | 2 +- language/v1/language_entities_text.py | 2 +- language/v1/language_entity_sentiment_gcs.py | 2 +- language/v1/language_entity_sentiment_text.py | 2 +- language/v1/language_sentiment_gcs.py | 2 +- language/v1/language_sentiment_text.py | 2 +- language/v1/language_syntax_gcs.py | 4 ++-- language/v1/language_syntax_text.py | 4 ++-- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py index db5958011cfe..941640b10772 100644 --- a/language/v1/language_classify_gcs.py +++ b/language/v1/language_classify_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index 2ecfd70bd588..52175f02db7a 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_entities_gcs.py b/language/v1/language_entities_gcs.py index edd3238ac88a..790592ca158e 100644 --- a/language/v1/language_entities_gcs.py +++ b/language/v1/language_entities_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index 2948f44d3500..9ae849f2d5f6 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_entity_sentiment_gcs.py b/language/v1/language_entity_sentiment_gcs.py index 87fb74de789e..9fafa737e5a4 100644 --- a/language/v1/language_entity_sentiment_gcs.py +++ b/language/v1/language_entity_sentiment_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_entity_sentiment_text.py b/language/v1/language_entity_sentiment_text.py index 6f914980a2e6..9b3d5b8a897f 100644 --- a/language/v1/language_entity_sentiment_text.py +++ b/language/v1/language_entity_sentiment_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_sentiment_gcs.py b/language/v1/language_sentiment_gcs.py index 366009668ba3..261f2f3e6233 100644 --- a/language/v1/language_sentiment_gcs.py +++ b/language/v1/language_sentiment_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py index c1325678bf39..12f1e22113c0 100644 --- a/language/v1/language_sentiment_text.py +++ b/language/v1/language_sentiment_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py index 732f77df2a92..32bf2acb589e 100644 --- a/language/v1/language_syntax_gcs.py +++ b/language/v1/language_syntax_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ def sample_analyze_syntax(gcs_content_uri): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of speech are as defined in: + # Parts of spech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index d1c3104ea890..290418864675 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of speech are as defined in: + # Parts of spech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. From e1652923b975d1c194c321aba6a2032ac9bcf605 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Wed, 12 Feb 2020 11:13:04 -0700 Subject: [PATCH 006/323] docs: fix small typo (#5) --- language/v1/language_syntax_text.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index 290418864675..2b4a51e28cc0 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -63,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Parts of speech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. From f8bb8206ea3e36f99e252987a979bcfd18aadc56 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Mon, 16 Mar 2020 09:04:15 -0700 Subject: [PATCH 007/323] chore: update samples to include additional region tags (#14) * Update samples to include additional region tags. * chore: empty commit --- language/v1/language_classify_gcs.py | 2 ++ language/v1/language_classify_text.py | 2 ++ language/v1/language_entities_text.py | 2 ++ language/v1/language_sentiment_text.py | 3 +++ language/v1/language_syntax_text.py | 3 +++ 5 files changed, 12 insertions(+) diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py index 941640b10772..8835fc769c84 100644 --- a/language/v1/language_classify_gcs.py +++ b/language/v1/language_classify_gcs.py @@ -43,6 +43,7 @@ def sample_classify_text(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' + # [START language_python_migration_document_gcs] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -51,6 +52,7 @@ def sample_classify_text(gcs_content_uri): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + # [END language_python_migration_document_gcs] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index 52175f02db7a..4fc77b20048a 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -41,6 +41,7 @@ def sample_classify_text(text_content): # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' + # [START language_python_migration_document_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -49,6 +50,7 @@ def sample_classify_text(text_content): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"content": text_content, "type": type_, "language": language} + # [END language_python_migration_document_text] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index 9ae849f2d5f6..c6149f656d0e 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -41,6 +41,7 @@ def sample_analyze_entities(text_content): # text_content = 'California is a state.' + # [START language_python_migration_entities_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -76,6 +77,7 @@ def sample_analyze_entities(text_content): print( u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) ) + # [END language_python_migration_entities_text] # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py index 12f1e22113c0..a0647678dc1f 100644 --- a/language/v1/language_sentiment_text.py +++ b/language/v1/language_sentiment_text.py @@ -41,6 +41,7 @@ def sample_analyze_sentiment(text_content): # text_content = 'I am so happy and joyful.' + # [START language_python_migration_sentiment_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -61,6 +62,8 @@ def sample_analyze_sentiment(text_content): response.document_sentiment.magnitude ) ) + # [END language_python_migration_sentiment_text] + # Get sentiment for all sentences in the document for sentence in response.sentences: print(u"Sentence text: {}".format(sentence.text.content)) diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index 2b4a51e28cc0..d57c9eeaf1d4 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -50,6 +50,7 @@ def sample_analyze_syntax(text_content): language = "en" document = {"content": text_content, "type": type_, "language": language} + # [START language_python_migration_syntax_text] # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = enums.EncodingType.UTF8 @@ -72,6 +73,8 @@ def sample_analyze_syntax(text_content): enums.PartOfSpeech.Tag(part_of_speech.tag).name ) ) + # [END language_python_migration_syntax_text] + # Get the voice, e.g. ACTIVE or PASSIVE print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. From c9a98314b76ce43e93fd078d1f77828412e83cad Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 21 Apr 2020 16:51:03 -0700 Subject: [PATCH 008/323] chore: increase default timeout; update templates (via synth) (#16) --- language/v1/language_classify_gcs.py | 2 -- language/v1/language_classify_text.py | 2 -- language/v1/language_entities_text.py | 2 -- language/v1/language_sentiment_text.py | 3 --- language/v1/language_syntax_text.py | 5 +---- 5 files changed, 1 insertion(+), 13 deletions(-) diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py index 8835fc769c84..941640b10772 100644 --- a/language/v1/language_classify_gcs.py +++ b/language/v1/language_classify_gcs.py @@ -43,7 +43,6 @@ def sample_classify_text(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' - # [START language_python_migration_document_gcs] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -52,7 +51,6 @@ def sample_classify_text(gcs_content_uri): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} - # [END language_python_migration_document_gcs] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index 4fc77b20048a..52175f02db7a 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -41,7 +41,6 @@ def sample_classify_text(text_content): # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' - # [START language_python_migration_document_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -50,7 +49,6 @@ def sample_classify_text(text_content): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"content": text_content, "type": type_, "language": language} - # [END language_python_migration_document_text] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index c6149f656d0e..9ae849f2d5f6 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -41,7 +41,6 @@ def sample_analyze_entities(text_content): # text_content = 'California is a state.' - # [START language_python_migration_entities_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -77,7 +76,6 @@ def sample_analyze_entities(text_content): print( u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) ) - # [END language_python_migration_entities_text] # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py index a0647678dc1f..12f1e22113c0 100644 --- a/language/v1/language_sentiment_text.py +++ b/language/v1/language_sentiment_text.py @@ -41,7 +41,6 @@ def sample_analyze_sentiment(text_content): # text_content = 'I am so happy and joyful.' - # [START language_python_migration_sentiment_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -62,8 +61,6 @@ def sample_analyze_sentiment(text_content): response.document_sentiment.magnitude ) ) - # [END language_python_migration_sentiment_text] - # Get sentiment for all sentences in the document for sentence in response.sentences: print(u"Sentence text: {}".format(sentence.text.content)) diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index d57c9eeaf1d4..290418864675 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -50,7 +50,6 @@ def sample_analyze_syntax(text_content): language = "en" document = {"content": text_content, "type": type_, "language": language} - # [START language_python_migration_syntax_text] # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = enums.EncodingType.UTF8 @@ -64,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of speech are as defined in: + # Parts of spech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. @@ -73,8 +72,6 @@ def sample_analyze_syntax(text_content): enums.PartOfSpeech.Tag(part_of_speech.tag).name ) ) - # [END language_python_migration_syntax_text] - # Get the voice, e.g. ACTIVE or PASSIVE print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. From 067fd1a557447308bcfe926845a8424abb47bd26 Mon Sep 17 00:00:00 2001 From: Emily Darrow <47046797+ejdarrow@users.noreply.github.com> Date: Mon, 22 Jun 2020 16:50:03 -0400 Subject: [PATCH 009/323] docs: add spacing for readability (#22) --- language/v1/language_entities_text.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index 9ae849f2d5f6..464a313d3029 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -54,13 +54,17 @@ def sample_analyze_entities(text_content): encoding_type = enums.EncodingType.UTF8 response = client.analyze_entities(document, encoding_type=encoding_type) + # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) + # Loop over the metadata associated with entity. For many known entities, # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). # Some entity types may have additional metadata, e.g. ADDRESS entities @@ -72,6 +76,7 @@ def sample_analyze_entities(text_content): # The API currently supports proper noun mentions. for mention in entity.mentions: print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun print( u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) From 136793a43927d4d0446ea16ab4ea0bb88690290b Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 21 Jul 2016 16:16:19 -0700 Subject: [PATCH 010/323] Adding Natural Language API samples. Change-Id: I68a1b5a11c2b3703963466b195be37a2c796bf79 --- language/snippets/README.md | 17 + language/snippets/api/README.md | 87 ++++ language/snippets/api/analyze.py | 115 ++++++ language/snippets/api/analyze_test.py | 258 ++++++++++++ language/snippets/api/requirements.txt | 1 + language/snippets/movie_nl/README.md | 152 +++++++ language/snippets/movie_nl/main.py | 383 ++++++++++++++++++ language/snippets/movie_nl/main_test.py | 128 ++++++ language/snippets/movie_nl/requirements.txt | 2 + language/snippets/ocr_nl/README.md | 227 +++++++++++ language/snippets/ocr_nl/main.py | 362 +++++++++++++++++ language/snippets/ocr_nl/main_test.py | 97 +++++ language/snippets/ocr_nl/requirements.txt | 1 + language/snippets/syntax_triples/README.md | 91 +++++ language/snippets/syntax_triples/main.py | 180 ++++++++ language/snippets/syntax_triples/main_test.py | 50 +++ .../snippets/syntax_triples/requirements.txt | 1 + .../resources/obama_wikipedia.txt | 1 + 18 files changed, 2153 insertions(+) create mode 100644 language/snippets/README.md create mode 100644 language/snippets/api/README.md create mode 100644 language/snippets/api/analyze.py create mode 100644 language/snippets/api/analyze_test.py create mode 100644 language/snippets/api/requirements.txt create mode 100644 language/snippets/movie_nl/README.md create mode 100644 language/snippets/movie_nl/main.py create mode 100644 language/snippets/movie_nl/main_test.py create mode 100644 language/snippets/movie_nl/requirements.txt create mode 100644 language/snippets/ocr_nl/README.md create mode 100755 language/snippets/ocr_nl/main.py create mode 100755 language/snippets/ocr_nl/main_test.py create mode 100644 language/snippets/ocr_nl/requirements.txt create mode 100644 language/snippets/syntax_triples/README.md create mode 100644 language/snippets/syntax_triples/main.py create mode 100755 language/snippets/syntax_triples/main_test.py create mode 100644 language/snippets/syntax_triples/requirements.txt create mode 100644 language/snippets/syntax_triples/resources/obama_wikipedia.txt diff --git a/language/snippets/README.md b/language/snippets/README.md new file mode 100644 index 000000000000..e63d45eb9a6a --- /dev/null +++ b/language/snippets/README.md @@ -0,0 +1,17 @@ +# Google Cloud Natural Language API examples + +This directory contains Python examples that use the +[Google Cloud Natural Language API](https://cloud.google.com/natural-language/). + +- [api](api) has a simple command line tool that shows off the API's features. + +- [movie_nl](movie_nl) combines sentiment and entity analysis to come up with +actors/directors who are the most and least popular in the imdb movie reviews. + +- [ocr_nl](ocr_nl) uses the [Cloud Vision API](https://cloud.google.com/vision/) +to extract text from images, then uses the NL API to extract entity information +from those texts, and stores the extracted information in a database in support +of further analysis and correlation. + +- [syntax_triples](syntax_triples) uses syntax analysis to find +subject-verb-object triples in a given piece of text. diff --git a/language/snippets/api/README.md b/language/snippets/api/README.md new file mode 100644 index 000000000000..9625df30c89f --- /dev/null +++ b/language/snippets/api/README.md @@ -0,0 +1,87 @@ + +# Google Cloud Natural Language API Sample + +This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] +for sentiment, entity, and syntax analysis. + +[NL-Docs]: https://cloud.google.com/natural-language/docs/ + +## Setup + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Run the sample + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +Then, run the script: + +```sh +$ python analyze.py +``` + +where `` is one of: `entities`, `sentiment`, or `syntax`. + +The script will write to STDOUT the json returned from the API for the requested feature. + +For example, if you run: + +```sh +$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." +``` + +You will see something like the following returned: + +``` +{ + "entities": [ + { + "salience": 0.49785897, + "mentions": [ + { + "text": { + "content": "Tom Sawyer", + "beginOffset": 0 + } + } + ], + "type": "PERSON", + "name": "Tom Sawyer", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" + } + }, + { + "salience": 0.12209519, + "mentions": [ + { + "text": { + "content": "Mark Twain", + "beginOffset": 47 + } + } + ], + "type": "PERSON", + "name": "Mark Twain", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" + } + } + ], + "language": "en" +} +``` diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py new file mode 100644 index 000000000000..73e892c354a1 --- /dev/null +++ b/language/snippets/api/analyze.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Analyzes text using the Google Cloud Natural Language API.""" + +import argparse +import json +import sys + +from googleapiclient import discovery +import httplib2 +from oauth2client.client import GoogleCredentials + + +def get_service(): + credentials = GoogleCredentials.get_application_default() + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + return discovery.build('language', 'v1beta1', http=http) + + +def get_native_encoding_type(): + """Returns the encoding type that matches Python's native strings.""" + if sys.maxunicode == 65535: + return 'UTF16' + else: + return 'UTF32' + + +def analyze_entities(text, encoding='UTF32'): + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'encodingType': encoding, + } + + service = get_service() + + request = service.documents().analyzeEntities(body=body) + response = request.execute() + + return response + + +def analyze_sentiment(text): + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + } + } + + service = get_service() + + request = service.documents().analyzeSentiment(body=body) + response = request.execute() + + return response + + +def analyze_syntax(text, encoding='UTF32'): + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'features': { + 'extract_syntax': True, + }, + 'encodingType': encoding, + } + + service = get_service() + + request = service.documents().annotateText(body=body) + response = request.execute() + + return response + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('command', choices=[ + 'entities', 'sentiment', 'syntax']) + parser.add_argument('text') + + args = parser.parse_args() + + if args.command == 'entities': + result = analyze_entities(args.text, get_native_encoding_type()) + elif args.command == 'sentiment': + result = analyze_sentiment(args.text) + elif args.command == 'syntax': + result = analyze_syntax(args.text, get_native_encoding_type()) + + print(json.dumps(result, indent=2)) diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py new file mode 100644 index 000000000000..11b0d65d6299 --- /dev/null +++ b/language/snippets/api/analyze_test.py @@ -0,0 +1,258 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +import analyze + + +def test_analyze_entities(): + result = analyze.analyze_entities( + 'Tom Sawyer is a book written by a guy known as Mark Twain.') + + assert result['language'] == 'en' + entities = result['entities'] + assert len(entities) + subject = entities[0] + assert subject['type'] == 'PERSON' + assert subject['name'].startswith('Tom') + + +def test_analyze_sentiment(capsys): + result = analyze.analyze_sentiment( + 'your face is really ugly and i hate it.') + + sentiment = result['documentSentiment'] + assert sentiment['polarity'] < 0 + assert sentiment['magnitude'] < 1 + + result = analyze.analyze_sentiment( + 'cheerio, mate - I greatly admire the pallor of your visage, and your ' + 'angle of repose leaves little room for improvement.') + + sentiment = result['documentSentiment'] + assert sentiment['polarity'] > 0 + assert sentiment['magnitude'] < 1 + + +def test_analyze_syntax(capsys): + result = analyze.analyze_syntax(textwrap.dedent(u'''\ + Keep away from people who try to belittle your ambitions. Small people + always do that, but the really great make you feel that you, too, can + become great. + - Mark Twain''')) + + assert len(result['tokens']) + first_token = result['tokens'][0] + assert first_token['text']['content'] == 'Keep' + assert first_token['partOfSpeech']['tag'] == 'VERB' + assert len(result['sentences']) > 1 + assert result['language'] == 'en' + + +def test_analyze_syntax_utf8(): + """Demonstrate the interpretation of the offsets when encoding=utf8. + + UTF8 is a variable-length encoding, where each character is at least 8 + bits. The offsets we get should be the index of the first byte of the + character. + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + byte_array = test_string.encode('utf8') + result = analyze.analyze_syntax(test_string, encoding='UTF8') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + offset = tokens[0]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+1].decode('utf8') == + tokens[0]['text']['content']) + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = tokens[1]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+2].decode('utf8') == + tokens[1]['text']['content']) + + assert tokens[2]['text']['content'] == u'\u0201' + offset = tokens[2]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+2].decode('utf8') == + tokens[2]['text']['content']) + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = tokens[3]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+4].decode('utf8') == + tokens[3]['text']['content']) + + # This demonstrates that the offset takes into account the variable-length + # characters before the target token. + assert tokens[4]['text']['content'] == u'b' + offset = tokens[4]['text'].get('beginOffset', 0) + # 'b' is only one byte long + assert (byte_array[offset:offset+1].decode('utf8') == + tokens[4]['text']['content']) + + +def test_analyze_syntax_utf16(): + """Demonstrate the interpretation of the offsets when encoding=utf16. + + UTF16 is a variable-length encoding, where each character is at least 16 + bits. The returned offsets will be the index of the first 2-byte character + of the token. + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + byte_array = test_string.encode('utf16') + # Remove the byte order marker, which the offsets don't account for + byte_array = byte_array[2:] + result = analyze.analyze_syntax(test_string, encoding='UTF16') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + # The offset is an offset into an array where each entry is 16 bits. Since + # we have an 8-bit array, the offsets should be doubled to index into our + # array. + offset = 2 * tokens[0]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[0]['text']['content']) + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = 2 * tokens[1]['text'].get('beginOffset', 0) + # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so + # slice out 2 bytes starting from the offset. Then interpret the bytes as + # utf16 for comparison. + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[1]['text']['content']) + + assert tokens[2]['text']['content'] == u'\u0201' + offset = 2 * tokens[2]['text'].get('beginOffset', 0) + # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so + # slice out 2 bytes starting from the offset. Then interpret the bytes as + # utf16 for comparison. + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[2]['text']['content']) + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = 2 * tokens[3]['text'].get('beginOffset', 0) + # A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret those bytes as + # utf16 for comparison. + assert (byte_array[offset:offset + 4].decode('utf16') == + tokens[3]['text']['content']) + + # This demonstrates that the offset takes into account the variable-length + # characters before the target token. + assert tokens[4]['text']['content'] == u'b' + offset = 2 * tokens[4]['text'].get('beginOffset', 0) + # Even though 'b' is only one byte long, utf16 still encodes it using 16 + # bits + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[4]['text']['content']) + + +def test_annotate_text_utf32(): + """Demonstrate the interpretation of the offsets when encoding=utf32. + + UTF32 is a fixed-length encoding, where each character is exactly 32 bits. + The returned offsets will be the index of the first 4-byte character + of the token. + + Python unicode objects index by the interpreted unicode character. This + means a given unicode character only ever takes up one slot in a unicode + string. This is equivalent to indexing into a UTF32 string, where all + characters are a fixed length and thus will only ever take up one slot. + + Thus, if you're indexing into a python unicode object, you can set + encoding to UTF32 to index directly into the unicode object (as opposed to + the byte arrays, as these examples do). + + Nonetheless, this test still demonstrates indexing into the byte array, for + consistency. Note that you could just index into the origin test_string + unicode object with the raw offset returned by the api (ie without + multiplying it by 4, as it is below). + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + byte_array = test_string.encode('utf32') + # Remove the byte order marker, which the offsets don't account for + byte_array = byte_array[4:] + result = analyze.analyze_syntax(test_string, encoding='UTF32') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + # The offset is an offset into an array where each entry is 32 bits. Since + # we have an 8-bit array, the offsets should be quadrupled to index into + # our array. + offset = 4 * tokens[0]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[0]['text']['content']) + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = 4 * tokens[1]['text'].get('beginOffset', 0) + # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret the bytes as + # utf32 for comparison. + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[1]['text']['content']) + + assert tokens[2]['text']['content'] == u'\u0201' + offset = 4 * tokens[2]['text'].get('beginOffset', 0) + # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret the bytes as + # utf32 for comparison. + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[2]['text']['content']) + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = 4 * tokens[3]['text'].get('beginOffset', 0) + # A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret those bytes as + # utf32 for comparison. + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[3]['text']['content']) + + # This demonstrates that the offset takes into account the variable-length + # characters before the target token. + assert tokens[4]['text']['content'] == u'b' + offset = 4 * tokens[4]['text'].get('beginOffset', 0) + # Even though 'b' is only one byte long, utf32 still encodes it using 32 + # bits + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[4]['text']['content']) + + +def test_annotate_text_utf32_directly_index_into_unicode(): + """Demonstrate using offsets directly, using encoding=utf32. + + See the explanation for test_annotate_text_utf32. Essentially, indexing + into a utf32 array is equivalent to indexing into a python unicode object. + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + result = analyze.analyze_syntax(test_string, encoding='UTF32') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + offset = tokens[0]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[0]['text']['content'] + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = tokens[1]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[1]['text']['content'] + + assert tokens[2]['text']['content'] == u'\u0201' + offset = tokens[2]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[2]['text']['content'] + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = tokens[3]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[3]['text']['content'] + + assert tokens[4]['text']['content'] == u'b' + offset = tokens[4]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[4]['text']['content'] diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt new file mode 100644 index 000000000000..0b96c82ee4c2 --- /dev/null +++ b/language/snippets/api/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.1 diff --git a/language/snippets/movie_nl/README.md b/language/snippets/movie_nl/README.md new file mode 100644 index 000000000000..687a6c4058ab --- /dev/null +++ b/language/snippets/movie_nl/README.md @@ -0,0 +1,152 @@ +# Introduction +This sample is an application of the Google Cloud Platform Natural Language API. +It uses the [imdb movie reviews data set](https://www.cs.cornell.edu/people/pabo/movie-review-data/) +from [Cornell University](http://www.cs.cornell.edu/) and performs sentiment & entity +analysis on it. It combines the capabilities of sentiment analysis and entity recognition +to come up with actors/directors who are the most and least popular. + +### Set Up to Authenticate With Your Project's Credentials + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +**Note:** If you get an error saying your API hasn't been enabled, make sure +that you have correctly set this environment variable, and that the project that +you got the service account from has the Natural Language API enabled. + +## How it works +This sample uses the Natural Language API to annotate the input text. The +movie review document is broken into sentences using the `extract_syntax` feature. +Each sentence is sent to the API for sentiment analysis. The positive and negative +sentiment values are combined to come up with a single overall sentiment of the +movie document. + +In addition to the sentiment, the program also extracts the entities of type +`PERSON`, who are the actors in the movie (including the director and anyone +important). These entities are assigned the sentiment value of the document to +come up with the most and least popular actors/directors. + +### Movie document +We define a movie document as a set of reviews. These reviews are individual +sentences and we use the NL API to extract the sentences from the document. See +an example movie document below. + +``` + Sample review sentence 1. Sample review sentence 2. Sample review sentence 3. +``` + +### Sentences and Sentiment +Each sentence from the above document is assigned a sentiment as below. + +``` + Sample review sentence 1 => Sentiment 1 + Sample review sentence 2 => Sentiment 2 + Sample review sentence 3 => Sentiment 3 +``` + +### Sentiment computation +The final sentiment is computed by simply adding the sentence sentiments. + +``` + Total Sentiment = Sentiment 1 + Sentiment 2 + Sentiment 3 +``` + + +### Entity extraction and Sentiment assignment +Entities with type `PERSON` are extracted from the movie document using the NL +API. Since these entities are mentioned in their respective movie document, +they are associated with the document sentiment. + +``` + Document 1 => Sentiment 1 + + Person 1 + Person 2 + Person 3 + + Document 2 => Sentiment 2 + + Person 2 + Person 4 + Person 5 +``` + +Based on the above data we can calculate the sentiment associated with Person 2: + +``` + Person 2 => (Sentiment 1 + Sentiment 2) +``` + +## Movie Data Set +We have used the Cornell Movie Review data as our input. Please follow the instructions below to download and extract the data. + +### Download Instructions + +``` + $ curl -O http://www.cs.cornell.edu/people/pabo/movie-review-data/mix20_rand700_tokens.zip + $ unzip mix20_rand700_tokens.zip +``` + +## Command Line Usage +In order to use the movie analyzer, follow the instructions below. (Note that the `--sample` parameter below runs the script on +fewer documents, and can be omitted to run it on the entire corpus) + +### Install Dependencies + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +Then, install dependencies by running the following pip command: + +``` +$ pip install -r requirements.txt +``` +### How to Run + +``` +$ python main.py analyze --inp "tokens/*/*" \ + --sout sentiment.json \ + --eout entity.json \ + --sample 5 +``` + +You should see the log file `movie.log` created. + +## Output Data +The program produces sentiment and entity output in json format. For example: + +### Sentiment Output +``` + { + "doc_id": "cv310_tok-16557.txt", + "sentiment": 3.099, + "label": -1 + } +``` + +### Entity Output + +``` + { + "name": "Sean Patrick Flanery", + "wiki_url": "http://en.wikipedia.org/wiki/Sean_Patrick_Flanery", + "sentiment": 3.099 + } +``` + +### Entity Output Sorting +In order to sort and rank the entities generated, use the same `main.py` script. For example, +this will print the top 5 actors with negative sentiment: + +``` +$ python main.py rank --entity_input entity.json \ + --sentiment neg \ + --reverse True \ + --sample 5 +``` diff --git a/language/snippets/movie_nl/main.py b/language/snippets/movie_nl/main.py new file mode 100644 index 000000000000..ba5c63b60b98 --- /dev/null +++ b/language/snippets/movie_nl/main.py @@ -0,0 +1,383 @@ +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import codecs +import glob +import json +import logging +import os + +from googleapiclient import discovery +from googleapiclient.errors import HttpError +from oauth2client.client import GoogleCredentials +import requests + + +def analyze_document(service, document): + """Analyze the document and get the distribution of sentiments and + the movie name.""" + logging.info('Analyzing {}'.format(document.doc_id)) + + sentences, entities = document.extract_all_sentences(service) + + sentiments = [get_sentiment(service, sentence) for sentence in sentences] + + return sentiments, entities + + +def get_request_body(text, syntax=True, entities=True, sentiment=True): + """Creates the body of the request to the language api in + order to get an appropriate api response.""" + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'features': { + 'extract_syntax': syntax, + 'extract_entities': entities, + 'extract_document_sentiment': sentiment, + }, + 'encoding_type': 'UTF32' + } + + return body + + +def get_sentiment(service, sentence): + """Get the sentence-level sentiment.""" + body = get_request_body( + sentence, syntax=False, entities=True, sentiment=True) + + docs = service.documents() + request = docs.annotateText(body=body) + + response = request.execute(num_retries=3) + + sentiment = response.get('documentSentiment') + + if sentiment is None: + return (None, None) + else: + pol = sentiment.get('polarity') + mag = sentiment.get('magnitude') + + if pol is None and mag is not None: + pol = 0 + return (pol, mag) + + +class Document(object): + """Document class captures a single document of movie reviews.""" + + def __init__(self, text, doc_id, doc_path): + self.text = text + self.doc_id = doc_id + self.doc_path = doc_path + self.sentence_entity_pair = None + self.label = None + + def extract_all_sentences(self, service): + """Extract the sentences in a document.""" + + if self.sentence_entity_pair is not None: + return self.sentence_entity_pair + + docs = service.documents() + request_body = get_request_body( + self.text, + syntax=True, + entities=True, + sentiment=False) + request = docs.annotateText(body=request_body) + + ent_list = [] + + response = request.execute() + entities = response.get('entities', []) + sentences = response.get('sentences', []) + + sent_list = [ + sentence.get('text', {}).get('content') for sentence in sentences + ] + + for entity in entities: + ent_type = entity.get('type') + wiki_url = entity.get('metadata', {}).get('wikipedia_url') + + if ent_type == 'PERSON' and wiki_url is not None: + ent_list.append(wiki_url) + + self.sentence_entity_pair = (sent_list, ent_list) + + return self.sentence_entity_pair + + +def to_sentiment_json(doc_id, sent, label): + """Convert the sentiment info to json. + + Args: + doc_id: Document id + sent: Overall Sentiment for the document + label: Actual label +1, 0, -1 for the document + + Returns: + String json representation of the input + + """ + json_doc = {} + + json_doc['doc_id'] = doc_id + json_doc['sentiment'] = float('%.3f' % sent) + json_doc['label'] = label + + return json.dumps(json_doc) + + +def get_wiki_title(wiki_url): + """Get the wikipedia page title for a given wikipedia URL. + + Args: + wiki_url: Wikipedia URL e.g., http://en.wikipedia.org/wiki/Sean_Connery + + Returns: + Wikipedia canonical name e.g., Sean Connery + + """ + try: + content = requests.get(wiki_url).text + return content.split('title')[1].split('-')[0].split('>')[1].strip() + except: + return os.path.basename(wiki_url).replace('_', ' ') + + +def to_entity_json(entity, entity_sentiment, entity_frequency): + """Convert entities and their associated sentiment to json. + + Args: + entity: Wikipedia entity name + entity_sentiment: Sentiment associated with the entity + entity_frequency: Frequency of the entity in the corpus + + Returns: + Json string representation of input + + """ + json_doc = {} + + avg_sentiment = float(entity_sentiment) / float(entity_frequency) + + json_doc['wiki_url'] = entity + json_doc['name'] = get_wiki_title(entity) + json_doc['sentiment'] = float('%.3f' % entity_sentiment) + json_doc['avg_sentiment'] = float('%.3f' % avg_sentiment) + + return json.dumps(json_doc) + + +def get_sentiment_entities(service, document): + """Compute the overall sentiment volume in the document. + + Args: + service: Client to Google Natural Language API + document: Movie review document (See Document object) + + Returns: + Tuple of total sentiment and entities found in the document + + """ + + sentiments, entities = analyze_document(service, document) + + sentiments = [sent for sent in sentiments if sent[0] is not None] + negative_sentiments = [ + polarity for polarity, magnitude in sentiments if polarity < 0.0] + positive_sentiments = [ + polarity for polarity, magnitude in sentiments if polarity > 0.0] + + negative = sum(negative_sentiments) + positive = sum(positive_sentiments) + total = positive + negative + + return (total, entities) + + +def get_sentiment_label(sentiment): + """Return the sentiment label based on the sentiment quantity.""" + if sentiment < 0: + return -1 + elif sentiment > 0: + return 1 + else: + return 0 + + +def process_movie_reviews(service, reader, sentiment_writer, entity_writer): + """Perform some sentiment math and come up with movie review.""" + collected_entities = {} + + for document in reader: + try: + sentiment_total, entities = get_sentiment_entities( + service, document) + except HttpError as e: + logging.error('Error process_movie_reviews {}'.format(e.content)) + continue + + document.label = get_sentiment_label(sentiment_total) + + sentiment_writer.write( + to_sentiment_json( + document.doc_id, + sentiment_total, + document.label + ) + ) + + sentiment_writer.write('\n') + + for ent in entities: + ent_sent, frequency = collected_entities.get(ent, (0, 0)) + ent_sent += sentiment_total + frequency += 1 + + collected_entities[ent] = (ent_sent, frequency) + + for entity, sentiment_frequency in collected_entities.items(): + entity_writer.write(to_entity_json(entity, sentiment_frequency[0], + sentiment_frequency[1])) + entity_writer.write('\n') + + sentiment_writer.flush() + entity_writer.flush() + + +def document_generator(dir_path_pattern, count=None): + """Generator for the input movie documents. + + Args: + dir_path_pattern: Input dir pattern e.g., "foo/bar/*/*" + count: Number of documents to read else everything if None + + Returns: + Generator which contains Document (See above) + + """ + for running_count, item in enumerate(glob.iglob(dir_path_pattern)): + if count and running_count >= count: + raise StopIteration() + + doc_id = os.path.basename(item) + + with codecs.open(item, encoding='utf-8') as f: + try: + text = f.read() + except UnicodeDecodeError: + continue + + yield Document(text, doc_id, item) + + +def rank_entities(reader, sentiment=None, topn=None, reverse_bool=False): + """Rank the entities (actors) based on their sentiment + assigned from the movie.""" + + items = [] + for item in reader: + json_item = json.loads(item) + sent = json_item.get('sentiment') + entity_item = (sent, json_item) + + if sentiment: + if sentiment == 'pos' and sent > 0: + items.append(entity_item) + elif sentiment == 'neg' and sent < 0: + items.append(entity_item) + else: + items.append(entity_item) + + items.sort(reverse=reverse_bool) + items = [json.dumps(item[1]) for item in items] + + print('\n'.join(items[:topn])) + + +def get_service(): + """Build a client to the Google Cloud Natural Language API.""" + + credentials = GoogleCredentials.get_application_default() + + return discovery.build('language', 'v1beta1', + credentials=credentials) + + +def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): + """Analyze the document for sentiment and entities""" + + # Create logger settings + logging.basicConfig(filename=log_file, level=logging.DEBUG) + + # Create a Google Service object + service = get_service() + + reader = document_generator(input_dir, sample) + + # Process the movie documents + process_movie_reviews(service, reader, sentiment_writer, entity_writer) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + subparsers = parser.add_subparsers(dest='command') + + rank_parser = subparsers.add_parser('rank') + + rank_parser.add_argument( + '--entity_input', help='location of entity input') + rank_parser.add_argument( + '--sentiment', help='filter sentiment as "neg" or "pos"') + rank_parser.add_argument( + '--reverse', help='reverse the order of the items', type=bool, + default=False + ) + rank_parser.add_argument( + '--sample', help='number of top items to process', type=int, + default=None + ) + + analyze_parser = subparsers.add_parser('analyze') + + analyze_parser.add_argument( + '--inp', help='location of the input', required=True) + analyze_parser.add_argument( + '--sout', help='location of the sentiment output', required=True) + analyze_parser.add_argument( + '--eout', help='location of the entity output', required=True) + analyze_parser.add_argument( + '--sample', help='number of top items to process', type=int) + analyze_parser.add_argument('--log_file', default='movie.log') + + args = parser.parse_args() + + if args.command == 'analyze': + with open(args.sout, 'w') as sout, open(args.eout, 'w') as eout: + analyze(args.inp, sout, eout, args.sample, args.log_file) + elif args.command == 'rank': + with open(args.entity_input, 'r') as entity_input: + rank_entities( + entity_input, args.sentiment, args.sample, args.reverse) diff --git a/language/snippets/movie_nl/main_test.py b/language/snippets/movie_nl/main_test.py new file mode 100644 index 000000000000..fc69e9bccfea --- /dev/null +++ b/language/snippets/movie_nl/main_test.py @@ -0,0 +1,128 @@ +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import main +import six + + +def test_get_request_body(): + text = 'hello world' + body = main.get_request_body(text, syntax=True, entities=True, + sentiment=False) + assert body.get('document').get('content') == text + + assert body.get('features').get('extract_syntax') is True + assert body.get('features').get('extract_entities') is True + assert body.get('features').get('extract_document_sentiment') is False + + +def test_get_sentiment_label(): + assert main.get_sentiment_label(20.50) == 1 + assert main.get_sentiment_label(-42.34) == -1 + + +def test_to_sentiment_json(): + doc_id = '12345' + sentiment = 23.344564 + label = 1 + + sentiment_json = json.loads( + main.to_sentiment_json(doc_id, sentiment, label) + ) + + assert sentiment_json.get('doc_id') == doc_id + assert sentiment_json.get('sentiment') == 23.345 + assert sentiment_json.get('label') == label + + +def test_process_movie_reviews(): + service = main.get_service() + + doc1 = main.Document('Top Gun was awesome and Tom Cruise rocked!', 'doc1', + 'doc1') + doc2 = main.Document('Tom Cruise is a great actor.', 'doc2', 'doc2') + + reader = [doc1, doc2] + swriter = six.StringIO() + ewriter = six.StringIO() + + main.process_movie_reviews(service, reader, swriter, ewriter) + + sentiments = swriter.getvalue().strip().split('\n') + entities = ewriter.getvalue().strip().split('\n') + + sentiments = [json.loads(sentiment) for sentiment in sentiments] + entities = [json.loads(entity) for entity in entities] + + # assert sentiments + assert sentiments[0].get('sentiment') == 1.0 + assert sentiments[0].get('label') == 1 + + assert sentiments[1].get('sentiment') == 1.0 + assert sentiments[1].get('label') == 1 + + # assert entities + assert len(entities) == 1 + assert entities[0].get('name') == 'Tom Cruise' + assert (entities[0].get('wiki_url') == + 'http://en.wikipedia.org/wiki/Tom_Cruise') + assert entities[0].get('sentiment') == 2.0 + + +def test_rank_positive_entities(capsys): + reader = [ + ('{"avg_sentiment": -12.0, ' + '"name": "Patrick Macnee", "sentiment": -12.0}'), + ('{"avg_sentiment": 5.0, ' + '"name": "Paul Rudd", "sentiment": 5.0}'), + ('{"avg_sentiment": -5.0, ' + '"name": "Martha Plimpton", "sentiment": -5.0}'), + ('{"avg_sentiment": 7.0, ' + '"name": "Lucy (2014 film)", "sentiment": 7.0}') + ] + + main.rank_entities(reader, 'pos', topn=1, reverse_bool=False) + out, err = capsys.readouterr() + + expected = ('{"avg_sentiment": 5.0, ' + '"name": "Paul Rudd", "sentiment": 5.0}') + + expected = ''.join(sorted(expected)) + out = ''.join(sorted(out.strip())) + assert out == expected + + +def test_rank_negative_entities(capsys): + reader = [ + ('{"avg_sentiment": -12.0, ' + '"name": "Patrick Macnee", "sentiment": -12.0}'), + ('{"avg_sentiment": 5.0, ' + '"name": "Paul Rudd", "sentiment": 5.0}'), + ('{"avg_sentiment": -5.0, ' + '"name": "Martha Plimpton", "sentiment": -5.0}'), + ('{"avg_sentiment": 7.0, ' + '"name": "Lucy (2014 film)", "sentiment": 7.0}') + ] + + main.rank_entities(reader, 'neg', topn=1, reverse_bool=True) + out, err = capsys.readouterr() + + expected = ('{"avg_sentiment": -5.0, ' + '"name": "Martha Plimpton", "sentiment": -5.0}') + + expected = ''.join(sorted(expected)) + out = ''.join(sorted(out.strip())) + assert out == expected diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt new file mode 100644 index 000000000000..c385fb4e4e03 --- /dev/null +++ b/language/snippets/movie_nl/requirements.txt @@ -0,0 +1,2 @@ +google-api-python-client==1.5.1 +requests==2.10.0 diff --git a/language/snippets/ocr_nl/README.md b/language/snippets/ocr_nl/README.md new file mode 100644 index 000000000000..189e93979010 --- /dev/null +++ b/language/snippets/ocr_nl/README.md @@ -0,0 +1,227 @@ + +# Using the Cloud Natural Language API to analyze image text found with Cloud Vision + +This example uses the [Cloud Vision API](https://cloud.google.com/vision/) to +detect text in images, then analyzes that text using the [Cloud NL (Natural +Language) API](https://cloud.google.com/natural-language/) to detect +[entities](https://cloud.google.com/natural-language/docs/basics#entity_analysis) +in the text. It stores the detected entity +information in an [sqlite3](https://www.sqlite.org) database, which may then be +queried. + +(This kind of analysis can be useful with scans of brochures and fliers, +invoices, and other types of company documents... or maybe just organizing your +memes). + +After the example script has analyzed a directory of images, it outputs some +information on the images' entities to STDOUT. You can also further query +the generated sqlite3 database. + +## Setup + +### Install sqlite3 as necessary + +The example requires that sqlite3 be installed. Most likely, sqlite3 is already +installed for you on your machine, but if not, you can find it +[here](https://www.sqlite.org/download.html). + +### Set Up to Authenticate With Your Project's Credentials + +* Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. +* Following those steps, make sure that you [Set Up a Service + Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), + and export the following environment variable: + + ``` + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json + ``` +* This sample also requires that you [enable the Cloud Vision + API](https://console.cloud.google.com/apis/api/vision.googleapis.com/overview?project=_) + +## Running the example + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +You must also be set up to authenticate with the Cloud APIs using your +project's service account credentials, as described above. + +Then, run the script on a directory of images to do the analysis, E.g.: + +```sh +$ python main.py --input_directory= +``` + +You can try this on a sample directory of images: + +```sh +$ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip +$ unzip ocr_nl-images.zip +$ python main.py --input_directory=images/ +``` + +## A walkthrough of the example and its results + +Let's take a look at what the example generates when run on the `images/` +sample directory, and how it does it. + +The script looks at each image file in the given directory, and uses the Vision +API's text detection capabilities (OCR) to find any text in each image. It +passes that info to the NL API, and asks it to detect [entities](xxx) in the +discovered text, then stores this information in a queryable database. + +To keep things simple, we're just passing to the NL API all the text found in a +given image, in one string. Note that sometimes this string can include +misinterpreted characters (if the image text was not very clear), or list words +"out of order" from how a human would interpret them. So, the text that is +actually passed to the NL API might not be quite what you would have predicted +with your human eyeballs. + +The Entity information returned by the NL API includes *type*, *name*, *salience*, +information about where in the text the given entity was found, and detected +language. It may also include *metadata*, including a link to a Wikipedia URL +that the NL API believes this entity maps to. See the +[documentation](https://cloud.google.com/natural-language/docs/) and the [API +reference pages](https://cloud.google.com/natural-language/reference/rest/v1beta1/Entity) +for more information about `Entity` fields. + +For example, if the NL API was given the sentence: + +``` +"Holmes and Watson walked over to the cafe." +``` + +it would return a response something like the following: + +``` +{ + "entities": [{ + "salience": 0.51629782, + "mentions": [{ + "text": { + "content": "Holmes", + "beginOffset": 0 + }}], + "type": "PERSON", + "name": "Holmes", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/Sherlock_Holmes" + }}, + { + "salience": 0.22334209, + "mentions": [{ + "text": { + "content": "Watson", + "beginOffset": 11 + }}], + "type": "PERSON", + "name": "Watson", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/Dr._Watson" + }}], + "language": "en" +} +``` + +Note that the NL API determined from context that "Holmes" was referring to +'Sherlock Holmes', even though the name "Sherlock" was not included. + +Note also that not all nouns in a given sentence are detected as Entities. An +Entity represents a phrase in the text that is a known entity, such as a person, +an organization, or location. The generic mention of a 'cafe' is not treated as +an entity in this sense. + +For each image file, we store its detected entity information (if any) in an +sqlite3 database. + +### Querying for information about the detected entities + +Once the detected entity information from all the images is stored in the +sqlite3 database, we can run some queries to do some interesting analysis. The +script runs a couple of such example query sets and outputs the result to STDOUT. + +The first set of queries outputs information about the top 15 most frequent +entity names found in the images, and the second outputs information about the +top 15 most frequent Wikipedia URLs found. + +For example, with the sample image set, note that the name 'Sherlock Holmes' is +found three times, but entities associated with the URL +http://en.wikipedia.org/wiki/Sherlock_Holmes are found four times; one of the +entity names was only "Holmes", but the NL API detected from context that it +referred to Sherlock Holmes. Similarly, you can see that mentions of 'Hive' and +'Spark' mapped correctly – given their context – to the URLs of those Apache +products. + +``` +----entity: http://en.wikipedia.org/wiki/Apache_Hive was found with count 1 +Found in file images/IMG_20160621_133020.jpg, detected as type OTHER, with + locale en. +names(s): set([u'hive']) +salience measure(s): set([0.0023808887]) +``` + +Similarly, 'Elizabeth' (in screencaps of text from "Pride and Prejudice") is +correctly mapped to http://en.wikipedia.org/wiki/Elizabeth_Bennet because of the +context of the surrounding text. + +``` +----entity: http://en.wikipedia.org/wiki/Elizabeth_Bennet was found with count 2 +Found in file images/Screenshot 2016-06-19 11.51.50.png, detected as type PERSON, with + locale en. +Found in file images/Screenshot 2016-06-19 12.08.30.png, detected as type PERSON, with + locale en. +names(s): set([u'elizabeth']) +salience measure(s): set([0.34601286, 0.0016268975]) +``` + +## Further queries to the sqlite3 database + +When the script runs, it makes a couple of example queries to the database +containing the entity information returned from the NL API. You can make further +queries on that database by starting up sqlite3 from the command line, and +passing it the name of the database file generated by running the example. This +file will be in the same directory, and have `entities` as a prefix, with the +timestamp appended. (If you have run the example more than once, a new database +file will be created each time). + +Run sqlite3 as follows (using the name of your own database file): + +```sh +$ sqlite3 entities1466518508.db +``` + +You'll see something like this: + +``` +SQLite version 3.8.10.2 2015-05-20 18:17:19 +Enter ".help" for usage hints. +sqlite> +``` + +From this prompt, you can make any queries on the data that you want. E.g., +start with something like: + +``` +sqlite> select * from entities limit 20; +``` + +Or, try this to see in which images the most entities were detected: + +``` +sqlite> select filename, count(filename) from entities group by filename; +``` + +You can do more complex queries to get further information about the entities +that have been discovered in your images. E.g., you might want to investigate +which of the entities are most commonly found together in the same image. See +the [SQLite documentation](https://www.sqlite.org/docs.html) for more +information. + + diff --git a/language/snippets/ocr_nl/main.py b/language/snippets/ocr_nl/main.py new file mode 100755 index 000000000000..6e329f53386e --- /dev/null +++ b/language/snippets/ocr_nl/main.py @@ -0,0 +1,362 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This example uses the Google Cloud Vision API to detect text in images, then +analyzes that text using the Google Cloud Natural Language API to detect +entities in the text. It stores the detected entity information in an sqlite3 +database, which may then be queried. + +After this script has analyzed a directory of images, it outputs some +information on the images' entities to STDOUT. You can also further query +the generated sqlite3 database; see the README for more information. + +Run the script on a directory of images to do the analysis, E.g.: + $ python main.py --input_directory= + +You can try this on a sample directory of images: + $ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip + $ unzip ocr_nl-images.zip + $ python main.py --input_directory=images/ + +""" # noqa + +import argparse +import base64 +import contextlib +import logging +import os +import sqlite3 +import sys +import time + +from googleapiclient import discovery +from googleapiclient import errors +import httplib2 +from oauth2client.client import GoogleCredentials + +BATCH_SIZE = 10 + + +class VisionApi(object): + """Construct and use the Cloud Vision API service.""" + + def __init__(self): + credentials = GoogleCredentials.get_application_default() + self.service = discovery.build('vision', 'v1', credentials=credentials) + + def detect_text(self, input_filenames, num_retries=3, max_results=6): + """Uses the Vision API to detect text in the given file.""" + batch_request = [] + for filename in input_filenames: + request = { + 'image': {}, + 'features': [{ + 'type': 'TEXT_DETECTION', + 'maxResults': max_results, + }] + } + + # Accept both files in cloud storage, as well as local files. + if filename.startswith('gs://'): + request['image']['source'] = { + 'gcsImageUri': filename + } + else: + with open(filename, 'rb') as image_file: + request['image']['content'] = base64.b64encode( + image_file.read()).decode('UTF-8') + + batch_request.append(request) + + request = self.service.images().annotate( + body={'requests': batch_request}) + + try: + responses = request.execute(num_retries=num_retries) + if 'responses' not in responses: + return {} + + text_response = {} + for filename, response in zip( + input_filenames, responses['responses']): + + if 'error' in response: + logging.error('API Error for {}: {}'.format( + filename, + response['error'].get('message', ''))) + continue + + text_response[filename] = response.get('textAnnotations', []) + + return text_response + + except errors.HttpError as e: + logging.error('Http Error for {}: {}'.format(filename, e)) + except KeyError as e2: + logging.error('Key error: {}'.format(e2)) + + +class TextAnalyzer(object): + """Construct and use the Google Natural Language API service.""" + + def __init__(self, db_filename=None): + credentials = GoogleCredentials.get_application_default() + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + self.service = discovery.build('language', 'v1beta1', http=http) + + # This list will store the entity information gleaned from the + # image files. + self.entity_info = [] + + # This is the filename of the sqlite3 database to save to + self.db_filename = db_filename or 'entities{}.db'.format( + int(time.time())) + + def _get_native_encoding_type(self): + """Returns the encoding type that matches Python's native strings.""" + if sys.maxunicode == 65535: + return 'UTF16' + else: + return 'UTF32' + + def nl_detect(self, text): + """Use the Natural Language API to analyze the given text string.""" + # We're only requesting 'entity' information from the Natural Language + # API at this time. + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'encodingType': self._get_native_encoding_type(), + } + entities = [] + try: + request = self.service.documents().analyzeEntities(body=body) + response = request.execute() + entities = response['entities'] + except errors.HttpError as e: + logging.error('Http Error: %s' % e) + except KeyError as e2: + logging.error('Key error: %s' % e2) + return entities + + def add_entities(self, filename, locale, document): + """Apply the Natural Language API to the document, and collect the + detected entities.""" + + # Apply the Natural Language API to the document. + entities = self.nl_detect(document) + self.extract_and_save_entity_info(entities, locale, filename) + + def extract_entity_info(self, entity): + """Extract information about an entity.""" + type = entity['type'] + name = entity['name'].lower() + metadata = entity['metadata'] + salience = entity['salience'] + wiki_url = metadata.get('wikipedia_url', None) + return (type, name, salience, wiki_url) + + def extract_and_save_entity_info(self, entities, locale, filename): + for entity in entities: + type, name, salience, wiki_url = self.extract_entity_info(entity) + # Because this is a small example, we're using a list to hold + # all the entity information, then we'll insert it into the + # database all at once when we've processed all the files. + # For a larger data set, you would want to write to the database + # in batches. + self.entity_info.append( + (locale, type, name, salience, wiki_url, filename)) + + def write_entity_info_to_db(self): + """Store the info gleaned about the entities in the text, via the + Natural Language API, in an sqlite3 database table, and then print out + some simple analytics. + """ + logging.info('Saving entity info to the sqlite3 database.') + # Create the db. + with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: + with conn as cursor: + # Create table + cursor.execute( + 'CREATE TABLE if not exists entities (locale text, ' + 'type text, name text, salience real, wiki_url text, ' + 'filename text)') + with conn as cursor: + # Load all the data + cursor.executemany( + 'INSERT INTO entities VALUES (?,?,?,?,?,?)', + self.entity_info) + + def output_entity_data(self): + """Output some info about the entities by querying the generated + sqlite3 database. + """ + + with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: + + # This query finds the number of times each entity name was + # detected, in descending order by count, and returns information + # about the first 15 names, including the files in which they were + # found, their detected 'salience' and language (locale), and the + # wikipedia urls (if any) associated with them. + print('\n==============\nTop 15 most frequent entity names:') + + cursor = conn.cursor() + results = cursor.execute( + 'select name, count(name) as wc from entities ' + 'group by name order by wc desc limit 15;') + + for item in results: + cursor2 = conn.cursor() + print(u'\n----Name: {} was found with count {}'.format(*item)) + results2 = cursor2.execute( + 'SELECT name, type, filename, locale, wiki_url, salience ' + 'FROM entities WHERE name=?', (item[0],)) + urls = set() + for elt in results2: + print(('Found in file {}, detected as type {}, with\n' + ' locale {} and salience {}.').format( + elt[2], elt[1], elt[3], elt[5])) + if elt[4]: + urls.add(elt[4]) + if urls: + print('url(s): {}'.format(urls)) + + # This query finds the number of times each wikipedia url was + # detected, in descending order by count, and returns information + # about the first 15 urls, including the files in which they were + # found and the names and 'salience' with which they were + # associated. + print('\n==============\nTop 15 most frequent Wikipedia URLs:') + c = conn.cursor() + results = c.execute( + 'select wiki_url, count(wiki_url) as wc from entities ' + 'group by wiki_url order by wc desc limit 15;') + + for item in results: + cursor2 = conn.cursor() + print('\n----entity: {} was found with count {}'.format(*item)) + results2 = cursor2.execute( + 'SELECT name, type, filename, locale, salience ' + 'FROM entities WHERE wiki_url=?', (item[0],)) + names = set() + salience = set() + for elt in results2: + print(('Found in file {}, detected as type {}, with\n' + ' locale {}.').format(elt[2], elt[1], elt[3])) + names.add(elt[0]) + salience.add(elt[4]) + print('names(s): {}'.format(names)) + print('salience measure(s): {}'.format(salience)) + + +def extract_description(texts): + """Returns text annotations as a single string""" + document = [] + + for text in texts: + try: + document.append(text['description']) + locale = text['locale'] + # Process only the first entry, which contains all + # text detected. + break + except KeyError as e: + logging.error('KeyError: %s\n%s' % (e, text)) + return (locale, ' '.join(document)) + + +def extract_descriptions(input_filename, texts, text_analyzer): + """Gets the text that was detected in the image.""" + if texts: + locale, document = extract_description(texts) + text_analyzer.add_entities(input_filename, locale, document) + sys.stdout.write('.') # Output a progress indicator. + sys.stdout.flush() + elif texts == []: + print('%s had no discernible text.' % input_filename) + + +def get_text_from_files(vision, input_filenames, text_analyzer): + """Call the Vision API on a file and index the results.""" + texts = vision.detect_text(input_filenames) + if texts: + for filename, text in texts.items(): + extract_descriptions(filename, text, text_analyzer) + + +def batch(list_to_batch, batch_size=BATCH_SIZE): + """Group a list into batches of size batch_size. + + >>> tuple(batch([1, 2, 3, 4, 5], batch_size=2)) + ((1, 2), (3, 4), (5)) + """ + for i in range(0, len(list_to_batch), batch_size): + yield tuple(list_to_batch[i:i + batch_size]) + + +def main(input_dir, db_filename=None): + """Walk through all the image files in the given directory, extracting any + text from them and feeding that text to the Natural Language API for + analysis. + """ + # Create a client object for the Vision API + vision_api_client = VisionApi() + # Create an object to analyze our text using the Natural Language API + text_analyzer = TextAnalyzer(db_filename) + + if input_dir: + allfileslist = [] + # Recursively construct a list of all the files in the given input + # directory. + for folder, subs, files in os.walk(input_dir): + for filename in files: + allfileslist.append(os.path.join(folder, filename)) + + # Analyze the text in the files using the Vision and Natural Language + # APIs. + for filenames in batch(allfileslist, batch_size=1): + get_text_from_files(vision_api_client, filenames, text_analyzer) + + # Save the result to a database, then run some queries on the database, + # with output to STDOUT. + text_analyzer.write_entity_info_to_db() + + # now, print some information about the entities detected. + text_analyzer.output_entity_data() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Detects text in the images in the given directory.') + parser.add_argument( + '--input_directory', + help='The image directory you\'d like to detect text in. If left ' + 'unspecified, the --db specified will be queried without being ' + 'updated.') + parser.add_argument( + '--db', help='The filename to use for the sqlite3 database.') + args = parser.parse_args() + + if not (args.input_directory or args.db): + parser.error('Either --input_directory or --db must be specified.') + + main(args.input_directory, args.db) diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py new file mode 100755 index 000000000000..c07ed747ea0f --- /dev/null +++ b/language/snippets/ocr_nl/main_test.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for main.""" + +import re +import zipfile + +import main + + +_TEST_IMAGE_URI = 'gs://{}/language/image8.png' + + +def test_batch_empty(): + for batch_size in range(1, 10): + assert len( + list(main.batch([], batch_size=batch_size))) == 0 + + +def test_batch_single(): + for batch_size in range(1, 10): + batched = tuple(main.batch([1], batch_size=batch_size)) + assert batched == ((1,),) + + +def test_single_image_returns_text(cloud_config): + vision_api_client = main.VisionApi() + + image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + texts = vision_api_client.detect_text([image_path]) + + assert image_path in texts + _, document = main.extract_description(texts[image_path]) + assert "daughter" in document + assert "Bennet" in document + assert "hat" in document + + +def test_single_nonimage_returns_error(): + vision_api_client = main.VisionApi() + texts = vision_api_client.detect_text(['README.md']) + assert "README.md" not in texts + + +def test_text_returns_entities(): + text = "Holmes and Watson walked to the cafe." + text_analyzer = main.TextAnalyzer() + entities = text_analyzer.nl_detect(text) + assert len(entities) == 2 + etype, ename, salience, wurl = text_analyzer.extract_entity_info( + entities[0]) + assert ename == 'holmes' + assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes' + + +def test_entities_list(cloud_config): + vision_api_client = main.VisionApi() + image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + texts = vision_api_client.detect_text([image_path]) + locale, document = main.extract_description(texts[image_path]) + text_analyzer = main.TextAnalyzer() + entities = text_analyzer.nl_detect(document) + assert len(entities) == 4 + etype, ename, salience, wurl = text_analyzer.extract_entity_info( + entities[0]) + assert ename == 'bennet' + assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet' + + +def test_main(remote_resource, tmpdir, capsys): + images_path = str(tmpdir.mkdir('images')) + + # First, pull down some test data + zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir) + + # Extract it to the image directory + with zipfile.ZipFile(zip_path) as zfile: + zfile.extractall(images_path) + + main.main(images_path, str(tmpdir.join('ocr_nl.db'))) + + stdout, _ = capsys.readouterr() + + assert re.search(r'google was found with count', stdout) diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt new file mode 100644 index 000000000000..0b96c82ee4c2 --- /dev/null +++ b/language/snippets/ocr_nl/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.1 diff --git a/language/snippets/syntax_triples/README.md b/language/snippets/syntax_triples/README.md new file mode 100644 index 000000000000..1342ee65289d --- /dev/null +++ b/language/snippets/syntax_triples/README.md @@ -0,0 +1,91 @@ +# Using the Cloud Natural Language API to find subject-verb-object triples in text + +This example finds subject-verb-object triples in a given piece of text using +syntax analysis capabilities of +[Cloud Natural Language API](https://cloud.google.com/natural-language/). +To do this, it calls the extractSyntax feature of the API +and uses the dependency parse tree and part-of-speech tags in the resposne +to build the subject-verb-object triples. The results are printed to STDOUT. +This type of analysis can be considered as the +first step towards an information extraction task. + +## Set Up to Authenticate With Your Project's Credentials + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Running the example + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +``` +$ pip install -r requirements.txt +``` +You must also be set up to authenticate with the Cloud APIs using your +project's service account credentials, as described above. + +Then, run the script on a file containing the text that you wish to analyze. +The text must be encoded in UTF8 or ASCII: + +``` +$ python main.py +``` + +Try this on a sample text in the resources directory: + +``` +$ python main.py resources/obama_wikipedia.txt +``` + +## A walkthrough of the example and its results + +Let's take a look at what the example generates when run on the +`obama_wikipedia.txt` sample file, and how it does it. + +The goal is to find all subject-verb-object +triples in the text. The example first sends the text to the Cloud Natural +Language API to perform extractSyntax analysis. Then, using part-of-speech tags, + it finds all the verbs in the text. For each verb, it uses the dependency +parse tree information to find all the dependent tokens. + +For example, given the following sentence in the `obama_wikipedia.txt` file: + +``` +"He began his presidential campaign in 2007" +``` +The example finds the verb `began`, and `He`, `campaign`, and `in` as its +dependencies. Then the script enumerates the dependencies for each verb and +finds all the subjects and objects. For the sentence above, the found subject +and object are `He` and `campaign`. + +The next step is to complete each subject and object token by adding their +dependencies to them. For example, in the sentence above, `his` and +`presidential` are dependent tokens for `campaign`. This is done using the +dependency parse tree, similar to verb dependencies as explained above. The +final result is (`He`, `began`, `his presidential campaign`) triple for +the example sentence above. + +The script performs this analysis for the entire text and prints the result. +For the `obama_wikipedia.txt` file, the result is the following: + +```sh ++------------------------------+------------+------------------------------+ +| Obama | received | national attention | ++------------------------------+------------+------------------------------+ +| He | began | his presidential campaign | ++------------------------------+------------+------------------------------+ +| he | won | sufficient delegates in the | +| | | Democratic Party primaries | ++------------------------------+------------+------------------------------+ +| He | defeated | Republican nominee John | +| | | McCain | +``` diff --git a/language/snippets/syntax_triples/main.py b/language/snippets/syntax_triples/main.py new file mode 100644 index 000000000000..1be174bff04c --- /dev/null +++ b/language/snippets/syntax_triples/main.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This example finds subject-verb-object triples in a given piece of text using +the syntax analysis capabilities of Cloud Natural Language API. The triples are +printed to STDOUT. This can be considered as the first step towards an +information extraction task. + +Run the script on a file containing the text that you wish to analyze. +The text must be encoded in UTF8 or ASCII: + $ python main.py + +Try this on a sample text in the resources directory: + $ python main.py resources/obama_wikipedia.txt +""" + +import argparse +import sys +import textwrap + +from googleapiclient import discovery +import httplib2 +from oauth2client.client import GoogleCredentials + + +def dependents(tokens, head_index): + """Returns an ordered list of the token indices of the dependents for + the given head.""" + # Create head->dependency index. + head_to_deps = {} + for i, token in enumerate(tokens): + head = token['dependencyEdge']['headTokenIndex'] + if i != head: + head_to_deps.setdefault(head, []).append(i) + return head_to_deps.get(head_index, ()) + + +def phrase_text_for_head(tokens, text, head_index): + """Returns the entire phrase containing the head token + and its dependents. + """ + begin, end = phrase_extent_for_head(tokens, head_index) + return text[begin:end] + + +def phrase_extent_for_head(tokens, head_index): + """Returns the begin and end offsets for the entire phrase + containing the head token and its dependents. + """ + begin = tokens[head_index]['text']['beginOffset'] + end = begin + len(tokens[head_index]['text']['content']) + for child in dependents(tokens, head_index): + child_begin, child_end = phrase_extent_for_head(tokens, child) + begin = min(begin, child_begin) + end = max(end, child_end) + return (begin, end) + + +def analyze_syntax(text): + """Use the NL API to analyze the given text string, and returns the + response from the API. Requests an encodingType that matches + the encoding used natively by Python. Raises an + errors.HTTPError if there is a connection problem. + """ + credentials = GoogleCredentials.get_application_default() + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + service = discovery.build( + 'language', 'v1beta1', http=http) + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'features': { + 'extract_syntax': True, + }, + 'encodingType': get_native_encoding_type(), + } + request = service.documents().annotateText(body=body) + return request.execute() + + +def get_native_encoding_type(): + """Returns the encoding type that matches Python's native strings.""" + if sys.maxunicode == 65535: + return 'UTF16' + else: + return 'UTF32' + + +def find_triples(tokens, + left_dependency_label='NSUBJ', + head_part_of_speech='VERB', + right_dependency_label='DOBJ'): + """Generator function that searches the given tokens + with the given part of speech tag, that have dependencies + with the given labels. For each such head found, yields a tuple + (left_dependent, head, right_dependent), where each element of the + tuple is an index into the tokens array. + """ + for head, token in enumerate(tokens): + if token['partOfSpeech']['tag'] == head_part_of_speech: + children = dependents(tokens, head) + left_deps = [] + right_deps = [] + for child in children: + child_token = tokens[child] + child_dep_label = child_token['dependencyEdge']['label'] + if child_dep_label == left_dependency_label: + left_deps.append(child) + elif child_dep_label == right_dependency_label: + right_deps.append(child) + for left_dep in left_deps: + for right_dep in right_deps: + yield (left_dep, head, right_dep) + + +def show_triple(tokens, text, triple): + """Prints the given triple (left, head, right). For left and right, + the entire phrase headed by each token is shown. For head, only + the head token itself is shown. + + """ + nsubj, verb, dobj = triple + + # Extract the text for each element of the triple. + nsubj_text = phrase_text_for_head(tokens, text, nsubj) + verb_text = tokens[verb]['text']['content'] + dobj_text = phrase_text_for_head(tokens, text, dobj) + + # Pretty-print the triple. + left = textwrap.wrap(nsubj_text, width=28) + mid = textwrap.wrap(verb_text, width=10) + right = textwrap.wrap(dobj_text, width=28) + print('+' + 30 * '-' + '+' + 12 * '-' + '+' + 30 * '-' + '+') + for l, m, r in zip(left, mid, right): + print('| {:<28s} | {:<10s} | {:<28s} |'.format( + l or '', m or '', r or '')) + + +def main(text_file): + # Extracts subject-verb-object triples from the given text file, + # and print each one. + + # Read the input file. + text = open(text_file, 'rb').read().decode('utf8') + + analysis = analyze_syntax(text) + tokens = analysis.get('tokens', []) + + for triple in find_triples(tokens): + show_triple(tokens, text, triple) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + 'text_file', + help='A file containing the document to process. ' + 'Should be encoded in UTF8 or ASCII') + args = parser.parse_args() + main(args.text_file) diff --git a/language/snippets/syntax_triples/main_test.py b/language/snippets/syntax_triples/main_test.py new file mode 100755 index 000000000000..62c2915da02e --- /dev/null +++ b/language/snippets/syntax_triples/main_test.py @@ -0,0 +1,50 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +import main + + +def test_dependents(): + text = "I am eating a delicious banana" + analysis = main.analyze_syntax(text) + tokens = analysis.get('tokens', []) + assert [0, 1, 5] == main.dependents(tokens, 2) + assert [3, 4] == main.dependents(tokens, 5) + + +def test_phrase_text_for_head(): + text = "A small collection of words" + analysis = main.analyze_syntax(text) + tokens = analysis.get('tokens', []) + assert "words" == main.phrase_text_for_head(tokens, text, 4) + + +def test_find_triples(): + text = "President Obama won the noble prize" + analysis = main.analyze_syntax(text) + tokens = analysis.get('tokens', []) + triples = main.find_triples(tokens) + for triple in triples: + assert (1, 2, 5) == triple + + +def test_obama_example(resource, capsys): + main.main(resource('obama_wikipedia.txt')) + stdout, _ = capsys.readouterr() + lines = stdout.split('\n') + assert re.match( + r'.*Obama\b.*\| received\b.*\| national attention\b', + lines[1]) diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt new file mode 100644 index 000000000000..0b96c82ee4c2 --- /dev/null +++ b/language/snippets/syntax_triples/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.1 diff --git a/language/snippets/syntax_triples/resources/obama_wikipedia.txt b/language/snippets/syntax_triples/resources/obama_wikipedia.txt new file mode 100644 index 000000000000..1e89d4ab0818 --- /dev/null +++ b/language/snippets/syntax_triples/resources/obama_wikipedia.txt @@ -0,0 +1 @@ +In 2004, Obama received national attention during his campaign to represent Illinois in the United States Senate with his victory in the March Democratic Party primary, his keynote address at the Democratic National Convention in July, and his election to the Senate in November. He began his presidential campaign in 2007 and, after a close primary campaign against Hillary Clinton in 2008, he won sufficient delegates in the Democratic Party primaries to receive the presidential nomination. He then defeated Republican nominee John McCain in the general election, and was inaugurated as president on January 20, 2009. Nine months after his inauguration, Obama was named the 2009 Nobel Peace Prize laureate. From 371cddbe33f95dfa0ae5763351c6e645d8af6384 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 16 Aug 2016 13:32:42 -0700 Subject: [PATCH 011/323] Auto-update dependencies. [(#456)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/456) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index c385fb4e4e03..d77ac3f28686 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.1 -requests==2.10.0 +requests==2.11.0 From 6c99bed4167155bc69216eb6666597666738e761 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 17 Aug 2016 09:34:47 -0700 Subject: [PATCH 012/323] Auto-update dependencies. [(#459)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/459) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0b96c82ee4c2..e5b1db3fd620 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index d77ac3f28686..5e4dc72a3933 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 requests==2.11.0 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 0b96c82ee4c2..e5b1db3fd620 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 0b96c82ee4c2..e5b1db3fd620 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 From 1574b6ba9f956b5a86e843fc2ca0de32961e82f2 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 18 Aug 2016 10:18:42 -0700 Subject: [PATCH 013/323] Auto-update dependencies. [(#464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/464) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 5e4dc72a3933..6a0104a23b30 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.2 -requests==2.11.0 +requests==2.11.1 From fd3d18d9830259118210bb2aa0d51a8dc8b2f474 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Fri, 19 Aug 2016 13:56:28 -0700 Subject: [PATCH 014/323] Fix import order lint errors Change-Id: Ieaf7237fc6f925daec46a07d2e81a452b841198a --- language/snippets/movie_nl/main_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/main_test.py b/language/snippets/movie_nl/main_test.py index fc69e9bccfea..8e22a1da34e7 100644 --- a/language/snippets/movie_nl/main_test.py +++ b/language/snippets/movie_nl/main_test.py @@ -14,9 +14,10 @@ import json -import main import six +import main + def test_get_request_body(): text = 'hello world' From f6182d3bdffb3a52e1b787a279d19892cfe45233 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 30 Aug 2016 10:08:32 -0700 Subject: [PATCH 015/323] Auto-update dependencies. [(#486)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/486) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e5b1db3fd620..0b52bd228750 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 6a0104a23b30..3aa1d6d91e0b 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 requests==2.11.1 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index e5b1db3fd620..0b52bd228750 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index e5b1db3fd620..0b52bd228750 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 From cf865a90252bd7e2771256fffa5ae9eb47b8c7d5 Mon Sep 17 00:00:00 2001 From: Jerjou Date: Mon, 19 Sep 2016 12:41:30 -0700 Subject: [PATCH 016/323] Add sentiment analysis sample [(#533)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/533) * Add sentiment analysis sample * Move sample review files into resources directory * Remove blank line from end of file * Update set up instructions to point to getting started guide * Update README to remove need to set up gcloud. Itemize what setting up a project entails. * Update NL README to link to Sentiment tutorial code * Coerce number types before comparison --- language/snippets/README.md | 4 ++ language/snippets/sentiment/README.md | 48 +++++++++++++++++ language/snippets/sentiment/requirements.txt | 2 + .../snippets/sentiment/resources/mixed.txt | 20 +++++++ language/snippets/sentiment/resources/neg.txt | 4 ++ .../snippets/sentiment/resources/neutral.txt | 3 ++ language/snippets/sentiment/resources/pos.txt | 11 ++++ .../snippets/sentiment/sentiment_analysis.py | 54 +++++++++++++++++++ .../sentiment/sentiment_analysis_test.py | 46 ++++++++++++++++ 9 files changed, 192 insertions(+) create mode 100644 language/snippets/sentiment/README.md create mode 100644 language/snippets/sentiment/requirements.txt create mode 100644 language/snippets/sentiment/resources/mixed.txt create mode 100644 language/snippets/sentiment/resources/neg.txt create mode 100644 language/snippets/sentiment/resources/neutral.txt create mode 100644 language/snippets/sentiment/resources/pos.txt create mode 100644 language/snippets/sentiment/sentiment_analysis.py create mode 100644 language/snippets/sentiment/sentiment_analysis_test.py diff --git a/language/snippets/README.md b/language/snippets/README.md index e63d45eb9a6a..1e4a6401bbd8 100644 --- a/language/snippets/README.md +++ b/language/snippets/README.md @@ -13,5 +13,9 @@ to extract text from images, then uses the NL API to extract entity information from those texts, and stores the extracted information in a database in support of further analysis and correlation. +- [sentiment](sentiment) contains the [Sentiment Analysis + Tutorial](https://cloud.google.com/natural-language/docs/sentiment-tutorial) +code as used within the documentation. + - [syntax_triples](syntax_triples) uses syntax analysis to find subject-verb-object triples in a given piece of text. diff --git a/language/snippets/sentiment/README.md b/language/snippets/sentiment/README.md new file mode 100644 index 000000000000..e77cdf16bef3 --- /dev/null +++ b/language/snippets/sentiment/README.md @@ -0,0 +1,48 @@ +# Introduction + +This sample contains the code referenced in the +[Sentiment Analysis Tutorial](http://cloud.google.com/natural-language/docs/sentiment-tutorial) +within the Google Cloud Natural Language API Documentation. A full walkthrough of this sample +is located within the documentation. + +This sample is a simple illustration of how to construct a sentiment analysis +request and process a response using the API. + +## Prerequisites + +Set up your +[Cloud Natural Language API project](https://cloud.google.com/natural-language/docs/getting-started#set_up_a_project) +, which includes: + +* Enabling the Natural Language API +* Setting up a service account +* Ensuring you've properly set up your `GOOGLE_APPLICATION_CREDENTIALS` for proper + authentication to the service. + +## Download the Code + +``` +$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples/language/sentiment/ +$ cd python-docs-samples/language/sentiment +``` + +## Run the Code + +Open a sample folder, create a virtualenv, install dependencies, and run the sample: + +``` +$ virtualenv env +$ source env/bin/activate +(env)$ pip install -r requirements.txt +``` + +### Usage + +This sample provides four sample movie reviews which you can +provide to the sample on the command line. (You can also +pass your own text files.) + +``` +(env)$ python sentiment_analysis.py textfile.txt +Sentiment: polarity of -0.1 with magnitude of 6.7 +``` diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt new file mode 100644 index 000000000000..dc1d6a1d6efe --- /dev/null +++ b/language/snippets/sentiment/requirements.txt @@ -0,0 +1,2 @@ +google-api-python-client==1.5.3 + diff --git a/language/snippets/sentiment/resources/mixed.txt b/language/snippets/sentiment/resources/mixed.txt new file mode 100644 index 000000000000..d4a42aa2928e --- /dev/null +++ b/language/snippets/sentiment/resources/mixed.txt @@ -0,0 +1,20 @@ +I really wanted to love 'Bladerunner' but ultimately I couldn't get +myself to appreciate it fully. However, you may like it if you're into +science fiction, especially if you're interested in the philosophical +exploration of what it means to be human or machine. Some of the gizmos +like the flying cars and the Vouight-Kampff machine (which seemed very +steampunk), were quite cool. + +I did find the plot pretty slow and but the dialogue and action sequences +were good. Unlike most science fiction films, this one was mostly quiet, and +not all that much happened, except during the last 15 minutes. I didn't +understand why a unicorn was in the movie. The visual effects were fantastic, +however, and the musical score and overall mood was quite interesting. +A futurist Los Angeles that was both highly polished and also falling apart +reminded me of 'Outland.' Certainly, the style of the film made up for +many of its pedantic plot holes. + +If you want your sci-fi to be lasers and spaceships, 'Bladerunner' may +disappoint you. But if you want it to make you think, this movie may +be worth the money. + diff --git a/language/snippets/sentiment/resources/neg.txt b/language/snippets/sentiment/resources/neg.txt new file mode 100644 index 000000000000..5dcbec0f8c5f --- /dev/null +++ b/language/snippets/sentiment/resources/neg.txt @@ -0,0 +1,4 @@ +What was Hollywood thinking with this movie! I hated, +hated, hated it. BORING! I went afterwards and demanded my money back. +They refused. + diff --git a/language/snippets/sentiment/resources/neutral.txt b/language/snippets/sentiment/resources/neutral.txt new file mode 100644 index 000000000000..89839ef25cf2 --- /dev/null +++ b/language/snippets/sentiment/resources/neutral.txt @@ -0,0 +1,3 @@ +I neither liked nor disliked this movie. Parts were interesting, but +overall I was left wanting more. The acting was pretty good. + diff --git a/language/snippets/sentiment/resources/pos.txt b/language/snippets/sentiment/resources/pos.txt new file mode 100644 index 000000000000..5f211496775c --- /dev/null +++ b/language/snippets/sentiment/resources/pos.txt @@ -0,0 +1,11 @@ +`Bladerunner` is often touted as one of the best science fiction films ever +made. Indeed, it satisfies many of the requisites for good sci-fi: a future +world with flying cars and humanoid robots attempting to rebel against their +creators. But more than anything, `Bladerunner` is a fantastic exploration +of the nature of what it means to be human. If we create robots which can +think, will they become human? And if they do, what makes us unique? Indeed, +how can we be sure we're not human in any case? `Bladerunner` explored +these issues before such movies as `The Matrix,' and did so intelligently. +The visual effects and score by Vangelis set the mood. See this movie +in a dark theatre to appreciate it fully. Highly recommended! + diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py new file mode 100644 index 000000000000..8e250881305a --- /dev/null +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -0,0 +1,54 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Demonstrates how to make a simple call to the Natural Language API''' + +import argparse +from googleapiclient import discovery +from oauth2client.client import GoogleCredentials + + +def main(movie_review_filename): + '''Run a sentiment analysis request on text within a passed filename.''' + + credentials = GoogleCredentials.get_application_default() + service = discovery.build('language', 'v1beta1', credentials=credentials) + + with open(movie_review_filename, 'r') as review_file: + service_request = service.documents().analyzeSentiment( + body={ + 'document': { + 'type': 'PLAIN_TEXT', + 'content': review_file.read(), + } + } + ) + response = service_request.execute() + + polarity = response['documentSentiment']['polarity'] + magnitude = response['documentSentiment']['magnitude'] + + print('Sentiment: polarity of {} with magnitude of {}'.format( + polarity, magnitude)) + return 0 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + 'movie_review_filename', + help='The filename of the movie review you\'d like to analyze.') + args = parser.parse_args() + main(args.movie_review_filename) diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py new file mode 100644 index 000000000000..d6b6a7abfea7 --- /dev/null +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -0,0 +1,46 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from sentiment_analysis import main + + +def test_pos(resource, capsys): + main(resource('pos.txt')) + out, err = capsys.readouterr() + polarity = float(re.search('polarity of (.+?) with', out).group(1)) + magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + assert polarity * magnitude > 0 + + +def test_neg(resource, capsys): + main(resource('neg.txt')) + out, err = capsys.readouterr() + polarity = float(re.search('polarity of (.+?) with', out).group(1)) + magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + assert polarity * magnitude < 0 + + +def test_mixed(resource, capsys): + main(resource('mixed.txt')) + out, err = capsys.readouterr() + polarity = float(re.search('polarity of (.+?) with', out).group(1)) + assert polarity <= 0.3 + assert polarity >= -0.3 + + +def test_neutral(resource, capsys): + main(resource('neutral.txt')) + out, err = capsys.readouterr() + magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + assert magnitude <= 2.0 From d9561d486fab7e6e16ce957965705a7007ee7cce Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 20 Sep 2016 12:26:02 -0700 Subject: [PATCH 017/323] Auto-update dependencies. [(#537)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/537) --- language/snippets/sentiment/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index dc1d6a1d6efe..0b52bd228750 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1,2 +1 @@ google-api-python-client==1.5.3 - From 2ac4f1730a022fe4322cfb20e05ae3f0a3163ef8 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 27 Sep 2016 09:46:43 -0700 Subject: [PATCH 018/323] Fix langauge test Change-Id: I285d4258c39ec7f0fd92e890a83e6dbc58941525 --- language/snippets/ocr_nl/main_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py index c07ed747ea0f..d3d6d6a5e21e 100755 --- a/language/snippets/ocr_nl/main_test.py +++ b/language/snippets/ocr_nl/main_test.py @@ -73,7 +73,7 @@ def test_entities_list(cloud_config): locale, document = main.extract_description(texts[image_path]) text_analyzer = main.TextAnalyzer() entities = text_analyzer.nl_detect(document) - assert len(entities) == 4 + assert entities etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'bennet' From 3f0bccd5d2c782947352338d1af27b27ffde8be6 Mon Sep 17 00:00:00 2001 From: Jason Dobry Date: Wed, 5 Oct 2016 09:56:04 -0700 Subject: [PATCH 019/323] Add new "quickstart" samples [(#547)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/547) --- language/snippets/cloud-client/quickstart.py | 39 +++++++++++++++++++ .../snippets/cloud-client/requirements.txt | 1 + 2 files changed, 40 insertions(+) create mode 100644 language/snippets/cloud-client/quickstart.py create mode 100644 language/snippets/cloud-client/requirements.txt diff --git a/language/snippets/cloud-client/quickstart.py b/language/snippets/cloud-client/quickstart.py new file mode 100644 index 000000000000..24f2ff4dea1f --- /dev/null +++ b/language/snippets/cloud-client/quickstart.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def run_quickstart(): + # [START language_quickstart] + # Imports the Google Cloud client library + from google.cloud import language + + # Instantiates a client + language_client = language.Client() + + # The text to analyze + text = 'Hello, world!' + document = language_client.document_from_text(text) + + # Detects the sentiment of the text + sentiment = document.analyze_sentiment() + + print('Text: {}'.format(text)) + print('Sentiment: {}, {}'.format(sentiment.polarity, sentiment.magnitude)) + # [END language_quickstart] + + +if __name__ == '__main__': + run_quickstart() diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt new file mode 100644 index 000000000000..cc966c0ec05f --- /dev/null +++ b/language/snippets/cloud-client/requirements.txt @@ -0,0 +1 @@ +google-cloud-language==0.20.0 From e0f6fa7c18c6a4a44fc5a7af0ac34977d17bee97 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 12 Oct 2016 10:48:57 -0700 Subject: [PATCH 020/323] Quickstart tests [(#569)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/569) * Add tests for quickstarts * Update secrets --- .../snippets/cloud-client/quickstart_test.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 language/snippets/cloud-client/quickstart_test.py diff --git a/language/snippets/cloud-client/quickstart_test.py b/language/snippets/cloud-client/quickstart_test.py new file mode 100644 index 000000000000..bd9954c83bb7 --- /dev/null +++ b/language/snippets/cloud-client/quickstart_test.py @@ -0,0 +1,22 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import quickstart + + +def test_quickstart(capsys): + quickstart.run_quickstart() + out, _ = capsys.readouterr() + assert 'Sentiment' in out From 2518410d8e8741cb416280ee801320723705dc2f Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 18 Oct 2016 13:41:00 -0700 Subject: [PATCH 021/323] Auto-update dependencies. [(#584)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/584) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0b52bd228750..c6e5aa1484ea 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 3aa1d6d91e0b..adafc4391a2f 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 requests==2.11.1 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 0b52bd228750..c6e5aa1484ea 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 0b52bd228750..c6e5aa1484ea 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 0b52bd228750..c6e5aa1484ea 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 From 8980c027450b1d269a8f631103ca3ff22c53ba5c Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 25 Oct 2016 10:54:45 -0700 Subject: [PATCH 022/323] Generate most non-appengine readmes Change-Id: I3779282126cdd05b047194d356932b9995484115 --- language/snippets/api/README.md | 87 -------------------- language/snippets/api/README.rst.in | 20 +++++ language/snippets/cloud-client/README.rst.in | 21 +++++ 3 files changed, 41 insertions(+), 87 deletions(-) delete mode 100644 language/snippets/api/README.md create mode 100644 language/snippets/api/README.rst.in create mode 100644 language/snippets/cloud-client/README.rst.in diff --git a/language/snippets/api/README.md b/language/snippets/api/README.md deleted file mode 100644 index 9625df30c89f..000000000000 --- a/language/snippets/api/README.md +++ /dev/null @@ -1,87 +0,0 @@ - -# Google Cloud Natural Language API Sample - -This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] -for sentiment, entity, and syntax analysis. - -[NL-Docs]: https://cloud.google.com/natural-language/docs/ - -## Setup - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -## Run the sample - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -```sh -$ pip install -r requirements.txt -``` - -Then, run the script: - -```sh -$ python analyze.py -``` - -where `` is one of: `entities`, `sentiment`, or `syntax`. - -The script will write to STDOUT the json returned from the API for the requested feature. - -For example, if you run: - -```sh -$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." -``` - -You will see something like the following returned: - -``` -{ - "entities": [ - { - "salience": 0.49785897, - "mentions": [ - { - "text": { - "content": "Tom Sawyer", - "beginOffset": 0 - } - } - ], - "type": "PERSON", - "name": "Tom Sawyer", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" - } - }, - { - "salience": 0.12209519, - "mentions": [ - { - "text": { - "content": "Mark Twain", - "beginOffset": 47 - } - } - ], - "type": "PERSON", - "name": "Mark Twain", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" - } - } - ], - "language": "en" -} -``` diff --git a/language/snippets/api/README.rst.in b/language/snippets/api/README.rst.in new file mode 100644 index 000000000000..31294fae1960 --- /dev/null +++ b/language/snippets/api/README.rst.in @@ -0,0 +1,20 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Analyze syntax + file: analyze.py + show_help: true diff --git a/language/snippets/cloud-client/README.rst.in b/language/snippets/cloud-client/README.rst.in new file mode 100644 index 000000000000..78da29111a06 --- /dev/null +++ b/language/snippets/cloud-client/README.rst.in @@ -0,0 +1,21 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: quickstart.py + +cloud_client_library: true From 6820628d117561abcd5af7e5814ff251c0eafec6 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 1 Nov 2016 23:10:14 -0700 Subject: [PATCH 023/323] Auto-update dependencies. [(#629)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/629) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index c6e5aa1484ea..2cd2a1334ea0 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index adafc4391a2f..7a0de85468fa 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 requests==2.11.1 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index c6e5aa1484ea..2cd2a1334ea0 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index c6e5aa1484ea..2cd2a1334ea0 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index c6e5aa1484ea..2cd2a1334ea0 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 From cce185a16a341148e09dc0982ee74d7b6a880b24 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 12:37:30 -0700 Subject: [PATCH 024/323] added language v1 endpoint --- language/snippets/api/analyze.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index 73e892c354a1..c46efff2d3eb 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -24,15 +24,33 @@ import httplib2 from oauth2client.client import GoogleCredentials - +# TODO REMOVE - when discovery is public +GOOGLE_API_KEY = "GOOGLE_API_KEY" + +# TODO REMOVE - when discovery is public +DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' + 'version=v1&labels=GOOGLE_INTERNAL&key={}') + +# TODO UNCOMMENT - when discovery is public +# def get_service(): +# credentials = GoogleCredentials.get_application_default() +# scoped_credentials = credentials.create_scoped( +# ['https://www.googleapis.com/auth/cloud-platform']) +# http = httplib2.Http() +# scoped_credentials.authorize(http) +# return discovery.build('language', 'v1', http=http) +# TODO END + +# TODO REMOVE - when discovery is public def get_service(): + """Get language service using discovery.""" + import os + api_key = os.environ[GOOGLE_API_KEY] credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) - return discovery.build('language', 'v1beta1', http=http) - + service = discovery.build('language', 'v1', http=httplib2.Http(), credentials=credentials, + discoveryServiceUrl=DISCOVERY_URL.format(api_key)) + return service +# TODO END def get_native_encoding_type(): """Returns the encoding type that matches Python's native strings.""" From ab8aceea0029de943d6ef94d5dc41026b7859ecc Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 12:39:46 -0700 Subject: [PATCH 025/323] added analyze syntax --- language/snippets/api/analyze.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index c46efff2d3eb..b0e0e0c4c23c 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -99,15 +99,11 @@ def analyze_syntax(text, encoding='UTF32'): 'type': 'PLAIN_TEXT', 'content': text, }, - 'features': { - 'extract_syntax': True, - }, - 'encodingType': encoding, } service = get_service() - request = service.documents().annotateText(body=body) + request = service.documents().analyzeSyntax(body=body) response = request.execute() return response From 357fcd78aba30b80758a51633e24e812745b12ac Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 14:04:42 -0700 Subject: [PATCH 026/323] fixed the get_service() method and added discoveryServiceUrl --- language/snippets/api/analyze.py | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index b0e0e0c4c23c..2d497baaa8a6 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -29,28 +29,17 @@ # TODO REMOVE - when discovery is public DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' - 'version=v1&labels=GOOGLE_INTERNAL&key={}') - -# TODO UNCOMMENT - when discovery is public -# def get_service(): -# credentials = GoogleCredentials.get_application_default() -# scoped_credentials = credentials.create_scoped( -# ['https://www.googleapis.com/auth/cloud-platform']) -# http = httplib2.Http() -# scoped_credentials.authorize(http) -# return discovery.build('language', 'v1', http=http) -# TODO END + 'version=v1&labels=GOOGLE_INTERNAL') -# TODO REMOVE - when discovery is public def get_service(): - """Get language service using discovery.""" - import os - api_key = os.environ[GOOGLE_API_KEY] credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1', http=httplib2.Http(), credentials=credentials, - discoveryServiceUrl=DISCOVERY_URL.format(api_key)) - return service -# TODO END + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + return discovery.build('language', 'v1', http=http, + discoveryServiceUrl=DISCOVERY_URL) + def get_native_encoding_type(): """Returns the encoding type that matches Python's native strings.""" From 99056dd769ecc201611d816eb80e831b3307e5a6 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 14:06:26 -0700 Subject: [PATCH 027/323] removed the env variable --- language/snippets/api/analyze.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index 2d497baaa8a6..aca928ce1764 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -24,8 +24,6 @@ import httplib2 from oauth2client.client import GoogleCredentials -# TODO REMOVE - when discovery is public -GOOGLE_API_KEY = "GOOGLE_API_KEY" # TODO REMOVE - when discovery is public DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' @@ -37,7 +35,8 @@ def get_service(): ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) - return discovery.build('language', 'v1', http=http, + return discovery.build('language', 'v1', + http=http, discoveryServiceUrl=DISCOVERY_URL) From 4fc315ff96a00768cdebc4cbad418cdd467e84ca Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 15:22:12 -0700 Subject: [PATCH 028/323] added README.md sample output --- language/snippets/api/README.md | 173 ++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 language/snippets/api/README.md diff --git a/language/snippets/api/README.md b/language/snippets/api/README.md new file mode 100644 index 000000000000..49a24cbc10fb --- /dev/null +++ b/language/snippets/api/README.md @@ -0,0 +1,173 @@ + +# Google Cloud Natural Language API Sample + +This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] +for sentiment, entity, and syntax analysis. + +[NL-Docs]: https://cloud.google.com/natural-language/docs/ + +## Setup + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Run the sample + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +Then, run the script: + +```sh +$ python analyze.py +``` + +where `` is one of: `entities`, `sentiment`, or `syntax`. + +The script will write to STDOUT the json returned from the API for the requested feature. + +* Example1: + +```sh +$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." +``` + +You will see something like the following returned: + +``` +{ + "entities": [ + { + "salience": 0.50827783, + "mentions": [ + { + "text": { + "content": "Tom Sawyer", + "beginOffset": 0 + }, + "type": "PROPER" + } + ], + "type": "PERSON", + "name": "Tom Sawyer", + "metadata": { + "mid": "/m/01b6vv", + "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" + } + }, + { + "salience": 0.22226454, + "mentions": [ + { + "text": { + "content": "book", + "beginOffset": 16 + }, + "type": "COMMON" + } + ], + "type": "WORK_OF_ART", + "name": "book", + "metadata": {} + }, + { + "salience": 0.18305534, + "mentions": [ + { + "text": { + "content": "guy", + "beginOffset": 34 + }, + "type": "COMMON" + } + ], + "type": "PERSON", + "name": "guy", + "metadata": {} + }, + { + "salience": 0.086402282, + "mentions": [ + { + "text": { + "content": "Mark Twain", + "beginOffset": 47 + }, + "type": "PROPER" + } + ], + "type": "PERSON", + "name": "Mark Twain", + "metadata": { + "mid": "/m/014635", + "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" + } + } + ], + "language": "en" +} +``` + +* Example2: + +```sh +$ python analyze.py entities "Apple has launched new iPhone." +``` + +You will see something like the following returned: + +``` +{ + "entities": [ + { + "salience": 0.72550339, + "mentions": [ + { + "text": { + "content": "Apple", + "beginOffset": 0 + }, + "type": "PROPER" + } + ], + "type": "ORGANIZATION", + "name": "Apple", + "metadata": { + "mid": "/m/0k8z", + "wikipedia_url": "http://en.wikipedia.org/wiki/Apple_Inc." + } + }, + { + "salience": 0.27449661, + "mentions": [ + { + "text": { + "content": "iPhone", + "beginOffset": 23 + }, + "type": "PROPER" + } + ], + "type": "CONSUMER_GOOD", + "name": "iPhone", + "metadata": { + "mid": "/m/027lnzs", + "wikipedia_url": "http://en.wikipedia.org/wiki/IPhone" + } + } + ], + "language": "en" +} +``` From 8285a3fc75aae5c8b1715cf22ed186e9cd4682f2 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 15:23:54 -0700 Subject: [PATCH 029/323] Added header --- language/snippets/api/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/language/snippets/api/README.md b/language/snippets/api/README.md index 49a24cbc10fb..33710e22712a 100644 --- a/language/snippets/api/README.md +++ b/language/snippets/api/README.md @@ -38,6 +38,8 @@ where `` is one of: `entities`, `sentiment`, or `syntax`. The script will write to STDOUT the json returned from the API for the requested feature. +## Example Runs + * Example1: ```sh From aa21d52542caa4c0f02304c906b4879db26ecc77 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Thu, 20 Oct 2016 17:43:33 -0700 Subject: [PATCH 030/323] added two blank lines --- language/snippets/api/analyze.py | 1 + 1 file changed, 1 insertion(+) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index aca928ce1764..6861ff5c39d0 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -29,6 +29,7 @@ DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' 'version=v1&labels=GOOGLE_INTERNAL') + def get_service(): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( From df80fa8d0735d387b221b98924a1001a2e2bee85 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 24 Oct 2016 14:40:57 -0700 Subject: [PATCH 031/323] changed to score as per new api --- language/snippets/api/analyze.py | 1 + 1 file changed, 1 insertion(+) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index 6861ff5c39d0..7e961d83ec84 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -88,6 +88,7 @@ def analyze_syntax(text, encoding='UTF32'): 'type': 'PLAIN_TEXT', 'content': text, }, + 'encoding_type': encoding } service = get_service() From e777e7699218896d8f27922b3fee5701ac9a82ab Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 24 Oct 2016 14:41:15 -0700 Subject: [PATCH 032/323] added encoding type --- language/snippets/api/analyze_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py index 11b0d65d6299..8f024fda6158 100644 --- a/language/snippets/api/analyze_test.py +++ b/language/snippets/api/analyze_test.py @@ -33,7 +33,7 @@ def test_analyze_sentiment(capsys): 'your face is really ugly and i hate it.') sentiment = result['documentSentiment'] - assert sentiment['polarity'] < 0 + assert sentiment['score'] < 0 assert sentiment['magnitude'] < 1 result = analyze.analyze_sentiment( @@ -41,7 +41,7 @@ def test_analyze_sentiment(capsys): 'angle of repose leaves little room for improvement.') sentiment = result['documentSentiment'] - assert sentiment['polarity'] > 0 + assert sentiment['score'] > 0 assert sentiment['magnitude'] < 1 From 284d4be1673406c57f50a9a3befe08fdd9de57f3 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Tue, 25 Oct 2016 14:07:36 -0700 Subject: [PATCH 033/323] added encoding type analyze sentiment --- language/snippets/api/analyze.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index 7e961d83ec84..ee40e4d89111 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -55,7 +55,7 @@ def analyze_entities(text, encoding='UTF32'): 'type': 'PLAIN_TEXT', 'content': text, }, - 'encodingType': encoding, + 'encoding_type': encoding, } service = get_service() @@ -66,12 +66,13 @@ def analyze_entities(text, encoding='UTF32'): return response -def analyze_sentiment(text): +def analyze_sentiment(text, encoding='UTF32'): body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, - } + }, + 'encoding_type': encoding } service = get_service() @@ -112,7 +113,7 @@ def analyze_syntax(text, encoding='UTF32'): if args.command == 'entities': result = analyze_entities(args.text, get_native_encoding_type()) elif args.command == 'sentiment': - result = analyze_sentiment(args.text) + result = analyze_sentiment(args.text, get_native_encoding_type()) elif args.command == 'syntax': result = analyze_syntax(args.text, get_native_encoding_type()) From 9796981c571fe53fbb5d17da2aed3e0ef14ad11a Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Thu, 10 Nov 2016 13:39:07 -0800 Subject: [PATCH 034/323] added a TODO --- language/snippets/api/analyze.py | 1 + 1 file changed, 1 insertion(+) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index ee40e4d89111..1e18a48764f1 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -36,6 +36,7 @@ def get_service(): ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) + # TODO Change to credentials=credentials return discovery.build('language', 'v1', http=http, discoveryServiceUrl=DISCOVERY_URL) From 2f3eba10cce279658a211be9a5bffaddc32d8d79 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Sat, 12 Nov 2016 08:09:34 -0800 Subject: [PATCH 035/323] added auto-gen rst file --- language/snippets/api/README.rst | 98 +++++++++++++++++++++ language/snippets/cloud-client/README.rst | 102 ++++++++++++++++++++++ 2 files changed, 200 insertions(+) create mode 100644 language/snippets/api/README.rst create mode 100644 language/snippets/cloud-client/README.rst diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst new file mode 100644 index 000000000000..369e2f4e9db3 --- /dev/null +++ b/language/snippets/api/README.rst @@ -0,0 +1,98 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud beta auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Analyze syntax ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python analyze.py + + usage: analyze.py [-h] {entities,sentiment,syntax} text + + Analyzes text using the Google Cloud Natural Language API. + + positional arguments: + {entities,sentiment,syntax} + text + + optional arguments: + -h, --help show this help message and exit + + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/language/snippets/cloud-client/README.rst b/language/snippets/cloud-client/README.rst new file mode 100644 index 000000000000..d8ba578d0098 --- /dev/null +++ b/language/snippets/cloud-client/README.rst @@ -0,0 +1,102 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud beta auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python quickstart.py + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ From c759cea68ee787f9181070865257f4c212b8aeb7 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Sat, 12 Nov 2016 09:47:16 -0800 Subject: [PATCH 036/323] removed README.md --- language/snippets/api/README.md | 175 -------------------------------- 1 file changed, 175 deletions(-) delete mode 100644 language/snippets/api/README.md diff --git a/language/snippets/api/README.md b/language/snippets/api/README.md deleted file mode 100644 index 33710e22712a..000000000000 --- a/language/snippets/api/README.md +++ /dev/null @@ -1,175 +0,0 @@ - -# Google Cloud Natural Language API Sample - -This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] -for sentiment, entity, and syntax analysis. - -[NL-Docs]: https://cloud.google.com/natural-language/docs/ - -## Setup - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -## Run the sample - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -```sh -$ pip install -r requirements.txt -``` - -Then, run the script: - -```sh -$ python analyze.py -``` - -where `` is one of: `entities`, `sentiment`, or `syntax`. - -The script will write to STDOUT the json returned from the API for the requested feature. - -## Example Runs - -* Example1: - -```sh -$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." -``` - -You will see something like the following returned: - -``` -{ - "entities": [ - { - "salience": 0.50827783, - "mentions": [ - { - "text": { - "content": "Tom Sawyer", - "beginOffset": 0 - }, - "type": "PROPER" - } - ], - "type": "PERSON", - "name": "Tom Sawyer", - "metadata": { - "mid": "/m/01b6vv", - "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" - } - }, - { - "salience": 0.22226454, - "mentions": [ - { - "text": { - "content": "book", - "beginOffset": 16 - }, - "type": "COMMON" - } - ], - "type": "WORK_OF_ART", - "name": "book", - "metadata": {} - }, - { - "salience": 0.18305534, - "mentions": [ - { - "text": { - "content": "guy", - "beginOffset": 34 - }, - "type": "COMMON" - } - ], - "type": "PERSON", - "name": "guy", - "metadata": {} - }, - { - "salience": 0.086402282, - "mentions": [ - { - "text": { - "content": "Mark Twain", - "beginOffset": 47 - }, - "type": "PROPER" - } - ], - "type": "PERSON", - "name": "Mark Twain", - "metadata": { - "mid": "/m/014635", - "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" - } - } - ], - "language": "en" -} -``` - -* Example2: - -```sh -$ python analyze.py entities "Apple has launched new iPhone." -``` - -You will see something like the following returned: - -``` -{ - "entities": [ - { - "salience": 0.72550339, - "mentions": [ - { - "text": { - "content": "Apple", - "beginOffset": 0 - }, - "type": "PROPER" - } - ], - "type": "ORGANIZATION", - "name": "Apple", - "metadata": { - "mid": "/m/0k8z", - "wikipedia_url": "http://en.wikipedia.org/wiki/Apple_Inc." - } - }, - { - "salience": 0.27449661, - "mentions": [ - { - "text": { - "content": "iPhone", - "beginOffset": 23 - }, - "type": "PROPER" - } - ], - "type": "CONSUMER_GOOD", - "name": "iPhone", - "metadata": { - "mid": "/m/027lnzs", - "wikipedia_url": "http://en.wikipedia.org/wiki/IPhone" - } - } - ], - "language": "en" -} -``` From 838c5203e8df8657b958b20e2ed499ec3b255cf4 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 14 Nov 2016 11:40:20 -0800 Subject: [PATCH 037/323] removed discovery service with public discovery --- language/snippets/api/analyze.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index 1e18a48764f1..707398280d4a 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -25,21 +25,15 @@ from oauth2client.client import GoogleCredentials -# TODO REMOVE - when discovery is public -DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' - 'version=v1&labels=GOOGLE_INTERNAL') - - def get_service(): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) - # TODO Change to credentials=credentials return discovery.build('language', 'v1', http=http, - discoveryServiceUrl=DISCOVERY_URL) + credentials=credentials) def get_native_encoding_type(): From 89da839b8a49c06c5a3db945d805abad63278935 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 14 Nov 2016 17:36:27 -0800 Subject: [PATCH 038/323] Movie sample changes --- language/snippets/movie_nl/main.py | 70 +++++-------------- language/snippets/movie_nl/main_test.py | 6 +- language/snippets/ocr_nl/main.py | 6 +- .../snippets/sentiment/sentiment_analysis.py | 20 ++++-- .../sentiment/sentiment_analysis_test.py | 14 ++-- 5 files changed, 48 insertions(+), 68 deletions(-) diff --git a/language/snippets/movie_nl/main.py b/language/snippets/movie_nl/main.py index ba5c63b60b98..d6ef5d169a51 100644 --- a/language/snippets/movie_nl/main.py +++ b/language/snippets/movie_nl/main.py @@ -21,6 +21,7 @@ from googleapiclient import discovery from googleapiclient.errors import HttpError +import httplib2 from oauth2client.client import GoogleCredentials import requests @@ -30,10 +31,7 @@ def analyze_document(service, document): the movie name.""" logging.info('Analyzing {}'.format(document.doc_id)) - sentences, entities = document.extract_all_sentences(service) - - sentiments = [get_sentiment(service, sentence) for sentence in sentences] - + sentiments, entities = document.extract_sentiment_entities(service) return sentiments, entities @@ -56,29 +54,6 @@ def get_request_body(text, syntax=True, entities=True, sentiment=True): return body -def get_sentiment(service, sentence): - """Get the sentence-level sentiment.""" - body = get_request_body( - sentence, syntax=False, entities=True, sentiment=True) - - docs = service.documents() - request = docs.annotateText(body=body) - - response = request.execute(num_retries=3) - - sentiment = response.get('documentSentiment') - - if sentiment is None: - return (None, None) - else: - pol = sentiment.get('polarity') - mag = sentiment.get('magnitude') - - if pol is None and mag is not None: - pol = 0 - return (pol, mag) - - class Document(object): """Document class captures a single document of movie reviews.""" @@ -86,32 +61,28 @@ def __init__(self, text, doc_id, doc_path): self.text = text self.doc_id = doc_id self.doc_path = doc_path - self.sentence_entity_pair = None + self.sentiment_entity_pair = None self.label = None - def extract_all_sentences(self, service): + def extract_sentiment_entities(self, service): """Extract the sentences in a document.""" - if self.sentence_entity_pair is not None: + if self.sentiment_entity_pair is not None: return self.sentence_entity_pair docs = service.documents() request_body = get_request_body( self.text, - syntax=True, + syntax=False, entities=True, - sentiment=False) + sentiment=True) request = docs.annotateText(body=request_body) ent_list = [] response = request.execute() entities = response.get('entities', []) - sentences = response.get('sentences', []) - - sent_list = [ - sentence.get('text', {}).get('content') for sentence in sentences - ] + documentSentiment = response.get('documentSentiment', {}) for entity in entities: ent_type = entity.get('type') @@ -120,9 +91,9 @@ def extract_all_sentences(self, service): if ent_type == 'PERSON' and wiki_url is not None: ent_list.append(wiki_url) - self.sentence_entity_pair = (sent_list, ent_list) + self.sentiment_entity_pair = (documentSentiment, ent_list) - return self.sentence_entity_pair + return self.sentiment_entity_pair def to_sentiment_json(doc_id, sent, label): @@ -200,18 +171,9 @@ def get_sentiment_entities(service, document): """ sentiments, entities = analyze_document(service, document) + score = sentiments.get('score') - sentiments = [sent for sent in sentiments if sent[0] is not None] - negative_sentiments = [ - polarity for polarity, magnitude in sentiments if polarity < 0.0] - positive_sentiments = [ - polarity for polarity, magnitude in sentiments if polarity > 0.0] - - negative = sum(negative_sentiments) - positive = sum(positive_sentiments) - total = positive + negative - - return (total, entities) + return (score, entities) def get_sentiment_label(sentiment): @@ -318,8 +280,12 @@ def get_service(): """Build a client to the Google Cloud Natural Language API.""" credentials = GoogleCredentials.get_application_default() - - return discovery.build('language', 'v1beta1', + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + return discovery.build('language', 'v1', + http=http, credentials=credentials) diff --git a/language/snippets/movie_nl/main_test.py b/language/snippets/movie_nl/main_test.py index 8e22a1da34e7..74c62eb382ae 100644 --- a/language/snippets/movie_nl/main_test.py +++ b/language/snippets/movie_nl/main_test.py @@ -69,10 +69,10 @@ def test_process_movie_reviews(): entities = [json.loads(entity) for entity in entities] # assert sentiments - assert sentiments[0].get('sentiment') == 1.0 + assert sentiments[0].get('sentiment') == 0.9 assert sentiments[0].get('label') == 1 - assert sentiments[1].get('sentiment') == 1.0 + assert sentiments[1].get('sentiment') == 0.9 assert sentiments[1].get('label') == 1 # assert entities @@ -80,7 +80,7 @@ def test_process_movie_reviews(): assert entities[0].get('name') == 'Tom Cruise' assert (entities[0].get('wiki_url') == 'http://en.wikipedia.org/wiki/Tom_Cruise') - assert entities[0].get('sentiment') == 2.0 + assert entities[0].get('sentiment') == 1.8 def test_rank_positive_entities(capsys): diff --git a/language/snippets/ocr_nl/main.py b/language/snippets/ocr_nl/main.py index 6e329f53386e..03fbdf9d4585 100755 --- a/language/snippets/ocr_nl/main.py +++ b/language/snippets/ocr_nl/main.py @@ -115,10 +115,12 @@ class TextAnalyzer(object): def __init__(self, db_filename=None): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) + ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) - self.service = discovery.build('language', 'v1beta1', http=http) + self.service = discovery.build('language', 'v1', + http=http, + credentials=credentials) # This list will store the entity information gleaned from the # image files. diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index 8e250881305a..31a8b88c0c0a 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -22,7 +22,7 @@ def main(movie_review_filename): '''Run a sentiment analysis request on text within a passed filename.''' credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1beta1', credentials=credentials) + service = discovery.build('language', 'v1', credentials=credentials) with open(movie_review_filename, 'r') as review_file: service_request = service.documents().analyzeSentiment( @@ -35,11 +35,23 @@ def main(movie_review_filename): ) response = service_request.execute() - polarity = response['documentSentiment']['polarity'] + score = response['documentSentiment']['score'] magnitude = response['documentSentiment']['magnitude'] - print('Sentiment: polarity of {} with magnitude of {}'.format( - polarity, magnitude)) + for i, sentence in enumerate(response['sentences']): + sentence_sentiment = sentence['sentiment']['score'] + print('Sentence {} has a sentiment score of {}'.format( + i, + sentence_sentiment)) + + print('Overall Sentiment: score of {} with magnitude of {}'.format( + score, + magnitude) + ) + return 0 + + print('Sentiment: score of {} with magnitude of {}'.format( + score, magnitude)) return 0 diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index d6b6a7abfea7..ff28211944e4 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -18,25 +18,25 @@ def test_pos(resource, capsys): main(resource('pos.txt')) out, err = capsys.readouterr() - polarity = float(re.search('polarity of (.+?) with', out).group(1)) + score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) - assert polarity * magnitude > 0 + assert score * magnitude > 0 def test_neg(resource, capsys): main(resource('neg.txt')) out, err = capsys.readouterr() - polarity = float(re.search('polarity of (.+?) with', out).group(1)) + score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) - assert polarity * magnitude < 0 + assert score * magnitude < 0 def test_mixed(resource, capsys): main(resource('mixed.txt')) out, err = capsys.readouterr() - polarity = float(re.search('polarity of (.+?) with', out).group(1)) - assert polarity <= 0.3 - assert polarity >= -0.3 + score = float(re.search('score of (.+?) with', out).group(1)) + assert score <= 0.3 + assert score >= -0.3 def test_neutral(resource, capsys): From 8fda21326ce6b1d96cb84324ccdce3244624df4e Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 14 Nov 2016 17:37:35 -0800 Subject: [PATCH 039/323] Updating language requirements. Change-Id: Ic08400df1c1f2440c46a845ee46e7674dc5e8fd5 --- language/snippets/cloud-client/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index cc966c0ec05f..ce34e7df6b81 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.20.0 +google-cloud-language==0.21.0 From fee08f30c0c600eb8aef38ed31c40a2459c478ed Mon Sep 17 00:00:00 2001 From: Phil Fritzsche Date: Mon, 14 Nov 2016 17:51:31 -0800 Subject: [PATCH 040/323] Update NL sentiment analysis readme to use score --- language/snippets/sentiment/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/sentiment/README.md b/language/snippets/sentiment/README.md index e77cdf16bef3..064b8f94bd24 100644 --- a/language/snippets/sentiment/README.md +++ b/language/snippets/sentiment/README.md @@ -44,5 +44,5 @@ pass your own text files.) ``` (env)$ python sentiment_analysis.py textfile.txt -Sentiment: polarity of -0.1 with magnitude of 6.7 +Sentiment: score of -0.1 with magnitude of 6.7 ``` From 5313737c87d6f80f84ac630fa2ef65f850ddabce Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 14 Nov 2016 21:45:47 -0800 Subject: [PATCH 041/323] fixed discovery build by removing http --- language/snippets/api/analyze.py | 6 ------ language/snippets/movie_nl/main.py | 6 ------ 2 files changed, 12 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index 707398280d4a..ab72208a289a 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -21,18 +21,12 @@ import sys from googleapiclient import discovery -import httplib2 from oauth2client.client import GoogleCredentials def get_service(): credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) return discovery.build('language', 'v1', - http=http, credentials=credentials) diff --git a/language/snippets/movie_nl/main.py b/language/snippets/movie_nl/main.py index d6ef5d169a51..6d21f4bf6e51 100644 --- a/language/snippets/movie_nl/main.py +++ b/language/snippets/movie_nl/main.py @@ -21,7 +21,6 @@ from googleapiclient import discovery from googleapiclient.errors import HttpError -import httplib2 from oauth2client.client import GoogleCredentials import requests @@ -280,12 +279,7 @@ def get_service(): """Build a client to the Google Cloud Natural Language API.""" credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) return discovery.build('language', 'v1', - http=http, credentials=credentials) From 23ee2e10e4f667b427ad2fcbdf1cc229bef8080d Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 15 Nov 2016 14:58:27 -0800 Subject: [PATCH 042/323] Update samples to support latest Google Cloud Python [(#656)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/656) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 7a0de85468fa..f552f7468eb6 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.11.1 +requests==2.12.0 From 194dfc083d441d93a2df92a7239560175ead2ec9 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 15 Nov 2016 15:05:13 -0800 Subject: [PATCH 043/323] Update readmes Change-Id: Ie385fd8105325c6f2754b737e0f11c84254bcb47 --- language/snippets/api/README.rst | 2 +- language/snippets/cloud-client/README.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index 369e2f4e9db3..301fed0e07d4 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -95,4 +95,4 @@ To run this sample: -.. _Google Cloud SDK: https://cloud.google.com/sdk/ +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/cloud-client/README.rst b/language/snippets/cloud-client/README.rst index d8ba578d0098..a0259ce9e866 100644 --- a/language/snippets/cloud-client/README.rst +++ b/language/snippets/cloud-client/README.rst @@ -99,4 +99,4 @@ to `browse the source`_ and `report issues`_. https://github.com/GoogleCloudPlatform/google-cloud-python/issues -.. _Google Cloud SDK: https://cloud.google.com/sdk/ +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file From 4b962383ebb555409d718570d9e7dce7b2e67b5e Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 16 Nov 2016 10:39:05 -0800 Subject: [PATCH 044/323] Fix flaky NL test Change-Id: I064c59e0c4f6d9b5ff0c888353df860dc344f74b --- language/snippets/ocr_nl/main_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py index d3d6d6a5e21e..e5a9962e1e25 100755 --- a/language/snippets/ocr_nl/main_test.py +++ b/language/snippets/ocr_nl/main_test.py @@ -59,7 +59,7 @@ def test_text_returns_entities(): text = "Holmes and Watson walked to the cafe." text_analyzer = main.TextAnalyzer() entities = text_analyzer.nl_detect(text) - assert len(entities) == 2 + assert entities etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'holmes' From d6912e9825829410e1a44dd8582b76975cd8a456 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 21 Nov 2016 10:00:49 -0800 Subject: [PATCH 045/323] Auto-update dependencies. [(#673)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/673) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index f552f7468eb6..7e62725a1a23 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.12.0 +requests==2.12.1 From c02d64a08f18ea94efcac88126afbce82c61189a Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 30 Nov 2016 10:42:14 -0800 Subject: [PATCH 046/323] Fix more lint issues Change-Id: I49d4f063d210629346d8d8390c9eaec261c4e519 --- language/snippets/sentiment/sentiment_analysis.py | 12 +++++------- .../snippets/sentiment/sentiment_analysis_test.py | 1 + 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index 31a8b88c0c0a..6f92fc66567e 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -11,15 +11,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Demonstrates how to make a simple call to the Natural Language API''' +"""Demonstrates how to make a simple call to the Natural Language API.""" import argparse + from googleapiclient import discovery from oauth2client.client import GoogleCredentials def main(movie_review_filename): - '''Run a sentiment analysis request on text within a passed filename.''' + """Run a sentiment analysis request on text within a passed filename.""" credentials = GoogleCredentials.get_application_default() service = discovery.build('language', 'v1', credentials=credentials) @@ -41,13 +42,10 @@ def main(movie_review_filename): for i, sentence in enumerate(response['sentences']): sentence_sentiment = sentence['sentiment']['score'] print('Sentence {} has a sentiment score of {}'.format( - i, - sentence_sentiment)) + i, sentence_sentiment)) print('Overall Sentiment: score of {} with magnitude of {}'.format( - score, - magnitude) - ) + score, magnitude)) return 0 print('Sentiment: score of {} with magnitude of {}'.format( diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index ff28211944e4..7ba1d1446f7a 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -12,6 +12,7 @@ # limitations under the License. import re + from sentiment_analysis import main From f46174001c1a00b2962aaa435839a37cfd8d3069 Mon Sep 17 00:00:00 2001 From: Jason Dobry Date: Tue, 6 Dec 2016 13:20:19 -0800 Subject: [PATCH 047/323] Add Cloud Client NL API samples. [(#668)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/668) --- language/snippets/cloud-client/README.rst.in | 5 +- language/snippets/cloud-client/quickstart.py | 2 +- .../snippets/cloud-client/requirements.txt | 2 +- .../snippets/cloud-client/resources/text.txt | 1 + language/snippets/cloud-client/snippets.py | 172 ++++++++++++++++++ .../snippets/cloud-client/snippets_test.py | 60 ++++++ 6 files changed, 239 insertions(+), 3 deletions(-) create mode 100644 language/snippets/cloud-client/resources/text.txt create mode 100644 language/snippets/cloud-client/snippets.py create mode 100644 language/snippets/cloud-client/snippets_test.py diff --git a/language/snippets/cloud-client/README.rst.in b/language/snippets/cloud-client/README.rst.in index 78da29111a06..faf402bfe9c0 100644 --- a/language/snippets/cloud-client/README.rst.in +++ b/language/snippets/cloud-client/README.rst.in @@ -4,7 +4,7 @@ product: name: Google Cloud Natural Language API short_name: Cloud Natural Language API url: https://cloud.google.com/natural-language/docs/ - description: > + description: > The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger @@ -17,5 +17,8 @@ setup: samples: - name: Quickstart file: quickstart.py +- name: Snippets + file: snippets.py + show_help: true cloud_client_library: true diff --git a/language/snippets/cloud-client/quickstart.py b/language/snippets/cloud-client/quickstart.py index 24f2ff4dea1f..3b42ac65ab67 100644 --- a/language/snippets/cloud-client/quickstart.py +++ b/language/snippets/cloud-client/quickstart.py @@ -31,7 +31,7 @@ def run_quickstart(): sentiment = document.analyze_sentiment() print('Text: {}'.format(text)) - print('Sentiment: {}, {}'.format(sentiment.polarity, sentiment.magnitude)) + print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) # [END language_quickstart] diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index ce34e7df6b81..130d1cc79a4f 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.21.0 +google-cloud-language==0.22.0 diff --git a/language/snippets/cloud-client/resources/text.txt b/language/snippets/cloud-client/resources/text.txt new file mode 100644 index 000000000000..97a1cea02b7a --- /dev/null +++ b/language/snippets/cloud-client/resources/text.txt @@ -0,0 +1 @@ +President Obama is speaking at the White House. \ No newline at end of file diff --git a/language/snippets/cloud-client/snippets.py b/language/snippets/cloud-client/snippets.py new file mode 100644 index 000000000000..c0f5f8a37226 --- /dev/null +++ b/language/snippets/cloud-client/snippets.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python + +# Copyright 2016 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Natural Language API + +For more information, the documentation at +https://cloud.google.com/natural-language/docs. +""" + +import argparse + +from google.cloud import language + + +def sentiment_text(text): + """Detects sentiment in the text.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment() + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def sentiment_file(gcs_uri): + """Detects sentiment in the file located in Google Cloud Storage.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment() + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def entities_text(text): + """Detects entities in the text.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects entities in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities() + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + + +def entities_file(gcs_uri): + """Detects entities in the file located in Google Cloud Storage.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities() + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + + +def syntax_text(text): + """Detects syntax in the text.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax() + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +def syntax_file(gcs_uri): + """Detects syntax in the file located in Google Cloud Storage.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax() + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + sentiment_text_parser = subparsers.add_parser( + 'sentiment-text', help=sentiment_text.__doc__) + sentiment_text_parser.add_argument('text') + + sentiment_file_parser = subparsers.add_parser( + 'sentiment-file', help=sentiment_file.__doc__) + sentiment_file_parser.add_argument('gcs_uri') + + entities_text_parser = subparsers.add_parser( + 'entities-text', help=entities_text.__doc__) + entities_text_parser.add_argument('text') + + entities_file_parser = subparsers.add_parser( + 'entities-file', help=entities_file.__doc__) + entities_file_parser.add_argument('gcs_uri') + + syntax_text_parser = subparsers.add_parser( + 'syntax-text', help=syntax_text.__doc__) + syntax_text_parser.add_argument('text') + + syntax_file_parser = subparsers.add_parser( + 'syntax-file', help=syntax_file.__doc__) + syntax_file_parser.add_argument('gcs_uri') + + args = parser.parse_args() + + if args.command == 'sentiment-text': + sentiment_text(args.text) + elif args.command == 'sentiment-file': + sentiment_file(args.gcs_uri) + elif args.command == 'entities-text': + entities_text(args.text) + elif args.command == 'entities-file': + entities_file(args.gcs_uri) + elif args.command == 'syntax-text': + syntax_text(args.text) + elif args.command == 'syntax-file': + syntax_file(args.gcs_uri) diff --git a/language/snippets/cloud-client/snippets_test.py b/language/snippets/cloud-client/snippets_test.py new file mode 100644 index 000000000000..47050e44e23c --- /dev/null +++ b/language/snippets/cloud-client/snippets_test.py @@ -0,0 +1,60 @@ +# Copyright 2016 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import snippets + + +def test_sentiment_text(cloud_config, capsys): + snippets.sentiment_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Score: 0.2' in out + + +def test_sentiment_file(cloud_config, capsys): + cloud_storage_input_uri = 'gs://{}/text.txt'.format( + cloud_config.storage_bucket) + snippets.sentiment_file(cloud_storage_input_uri) + out, _ = capsys.readouterr() + assert 'Score: 0.2' in out + + +def test_entities_text(cloud_config, capsys): + snippets.entities_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_entities_file(cloud_config, capsys): + cloud_storage_input_uri = 'gs://{}/text.txt'.format( + cloud_config.storage_bucket) + snippets.entities_file(cloud_storage_input_uri) + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_syntax_text(cloud_config, capsys): + snippets.syntax_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'NOUN: President' in out + + +def test_syntax_file(cloud_config, capsys): + cloud_storage_input_uri = 'gs://{}/text.txt'.format( + cloud_config.storage_bucket) + snippets.syntax_file(cloud_storage_input_uri) + out, _ = capsys.readouterr() + assert 'NOUN: President' in out From cf4bffad0b3e40d927b215e498baeadcfec50833 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 13 Dec 2016 09:54:02 -0800 Subject: [PATCH 048/323] Auto-update dependencies. [(#715)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/715) --- language/snippets/cloud-client/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index 130d1cc79a4f..69287a660f22 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.0 +google-cloud-language==0.22.1 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 7e62725a1a23..a915b32fce12 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.12.1 +requests==2.12.3 From e08be630e70ec0c7fe52d02bd79d12c119e241aa Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 15 Dec 2016 10:02:03 -0800 Subject: [PATCH 049/323] Auto-update dependencies. [(#718)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/718) --- language/snippets/cloud-client/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index 69287a660f22..afd4c94e7f9c 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.1 +google-cloud-language==0.22.2 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index a915b32fce12..17841fcaa2dc 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.12.3 +requests==2.12.4 From de1ec350a4f380047cf1c9292bd3b43a2858c9d6 Mon Sep 17 00:00:00 2001 From: Jason Dobry Date: Thu, 15 Dec 2016 11:05:38 -0800 Subject: [PATCH 050/323] Refactored the Sentiment Analysis tutorial to use the Cloud Client Library. [(#713)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/713) --- language/snippets/sentiment/requirements.txt | 2 +- .../snippets/sentiment/sentiment_analysis.py | 55 ++++++++++--------- .../sentiment/sentiment_analysis_test.py | 10 ++-- 3 files changed, 36 insertions(+), 31 deletions(-) diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 2cd2a1334ea0..afd4c94e7f9c 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-cloud-language==0.22.2 diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index 6f92fc66567e..c574c31827be 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -11,38 +11,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +# [START sentiment_tutorial] """Demonstrates how to make a simple call to the Natural Language API.""" +# [START sentiment_tutorial_import] import argparse -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials +from google.cloud import language +# [END sentiment_tutorial_import] -def main(movie_review_filename): - """Run a sentiment analysis request on text within a passed filename.""" - - credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1', credentials=credentials) +def print_result(annotations): + score = annotations.sentiment.score + magnitude = annotations.sentiment.magnitude - with open(movie_review_filename, 'r') as review_file: - service_request = service.documents().analyzeSentiment( - body={ - 'document': { - 'type': 'PLAIN_TEXT', - 'content': review_file.read(), - } - } - ) - response = service_request.execute() - - score = response['documentSentiment']['score'] - magnitude = response['documentSentiment']['magnitude'] - - for i, sentence in enumerate(response['sentences']): - sentence_sentiment = sentence['sentiment']['score'] + for index, sentence in enumerate(annotations.sentences): + sentence_sentiment = sentence.sentiment.score print('Sentence {} has a sentiment score of {}'.format( - i, sentence_sentiment)) + index, sentence_sentiment)) print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) @@ -53,6 +39,23 @@ def main(movie_review_filename): return 0 +def analyze(movie_review_filename): + """Run a sentiment analysis request on text within a passed filename.""" + language_client = language.Client() + + with open(movie_review_filename, 'r') as review_file: + # Instantiates a plain text document. + document = language_client.document_from_html(review_file.read()) + + # Detects sentiment in the document. + annotations = document.annotate_text(include_sentiment=True, + include_syntax=False, + include_entities=False) + + # Print the results + print_result(annotations) + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, @@ -61,4 +64,6 @@ def main(movie_review_filename): 'movie_review_filename', help='The filename of the movie review you\'d like to analyze.') args = parser.parse_args() - main(args.movie_review_filename) + + analyze(args.movie_review_filename) +# [END sentiment_tutorial] diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index 7ba1d1446f7a..19ec86f17d5b 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -13,11 +13,11 @@ import re -from sentiment_analysis import main +from sentiment_analysis import analyze def test_pos(resource, capsys): - main(resource('pos.txt')) + analyze(resource('pos.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) @@ -25,7 +25,7 @@ def test_pos(resource, capsys): def test_neg(resource, capsys): - main(resource('neg.txt')) + analyze(resource('neg.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) @@ -33,7 +33,7 @@ def test_neg(resource, capsys): def test_mixed(resource, capsys): - main(resource('mixed.txt')) + analyze(resource('mixed.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) assert score <= 0.3 @@ -41,7 +41,7 @@ def test_mixed(resource, capsys): def test_neutral(resource, capsys): - main(resource('neutral.txt')) + analyze(resource('neutral.txt')) out, err = capsys.readouterr() magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert magnitude <= 2.0 From 4975ee7dabf5093da8b90c037a12cda431894cd9 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 20 Dec 2016 14:27:22 -0800 Subject: [PATCH 051/323] Add snippets and tests for language tutorial. [(#729)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/729) --- language/snippets/tutorial/README.rst.in | 20 ++++++ language/snippets/tutorial/requirements.txt | 1 + .../tutorial/reviews/bladerunner-mixed.txt | 19 +++++ .../tutorial/reviews/bladerunner-neg.txt | 3 + .../tutorial/reviews/bladerunner-neutral.txt | 2 + .../tutorial/reviews/bladerunner-pos.txt | 10 +++ language/snippets/tutorial/tutorial.py | 71 +++++++++++++++++++ language/snippets/tutorial/tutorial_test.py | 51 +++++++++++++ 8 files changed, 177 insertions(+) create mode 100644 language/snippets/tutorial/README.rst.in create mode 100644 language/snippets/tutorial/requirements.txt create mode 100644 language/snippets/tutorial/reviews/bladerunner-mixed.txt create mode 100644 language/snippets/tutorial/reviews/bladerunner-neg.txt create mode 100644 language/snippets/tutorial/reviews/bladerunner-neutral.txt create mode 100644 language/snippets/tutorial/reviews/bladerunner-pos.txt create mode 100644 language/snippets/tutorial/tutorial.py create mode 100644 language/snippets/tutorial/tutorial_test.py diff --git a/language/snippets/tutorial/README.rst.in b/language/snippets/tutorial/README.rst.in new file mode 100644 index 000000000000..aea593b277f4 --- /dev/null +++ b/language/snippets/tutorial/README.rst.in @@ -0,0 +1,20 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language Tutorial + short_name: Cloud Natural Language Tutorial + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Language tutorial + file: tutorial.py + show_help: true diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt new file mode 100644 index 000000000000..2cd2a1334ea0 --- /dev/null +++ b/language/snippets/tutorial/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.5 diff --git a/language/snippets/tutorial/reviews/bladerunner-mixed.txt b/language/snippets/tutorial/reviews/bladerunner-mixed.txt new file mode 100644 index 000000000000..3b520b65a8a7 --- /dev/null +++ b/language/snippets/tutorial/reviews/bladerunner-mixed.txt @@ -0,0 +1,19 @@ +I really wanted to love 'Bladerunner' but ultimately I couldn't get +myself to appreciate it fully. However, you may like it if you're into +science fiction, especially if you're interested in the philosophical +exploration of what it means to be human or machine. Some of the gizmos +like the flying cars and the Vouight-Kampff machine (which seemed very +steampunk), were quite cool. + +I did find the plot pretty slow and but the dialogue and action sequences +were good. Unlike most science fiction films, this one was mostly quiet, and +not all that much happened, except during the last 15 minutes. I didn't +understand why a unicorn was in the movie. The visual effects were fantastic, +however, and the musical score and overall mood was quite interesting. +A futurist Los Angeles that was both highly polished and also falling apart +reminded me of 'Outland.' Certainly, the style of the film made up for +many of its pedantic plot holes. + +If you want your sci-fi to be lasers and spaceships, 'Bladerunner' may +disappoint you. But if you want it to make you think, this movie may +be worth the money. \ No newline at end of file diff --git a/language/snippets/tutorial/reviews/bladerunner-neg.txt b/language/snippets/tutorial/reviews/bladerunner-neg.txt new file mode 100644 index 000000000000..dbef76271d16 --- /dev/null +++ b/language/snippets/tutorial/reviews/bladerunner-neg.txt @@ -0,0 +1,3 @@ +What was Hollywood thinking with this movie! I hated, +hated, hated it. BORING! I went afterwards and demanded my money back. +They refused. \ No newline at end of file diff --git a/language/snippets/tutorial/reviews/bladerunner-neutral.txt b/language/snippets/tutorial/reviews/bladerunner-neutral.txt new file mode 100644 index 000000000000..60556e604be9 --- /dev/null +++ b/language/snippets/tutorial/reviews/bladerunner-neutral.txt @@ -0,0 +1,2 @@ +I neither liked nor disliked this movie. Parts were interesting, but +overall I was left wanting more. The acting was pretty good. \ No newline at end of file diff --git a/language/snippets/tutorial/reviews/bladerunner-pos.txt b/language/snippets/tutorial/reviews/bladerunner-pos.txt new file mode 100644 index 000000000000..a7faf81570b3 --- /dev/null +++ b/language/snippets/tutorial/reviews/bladerunner-pos.txt @@ -0,0 +1,10 @@ +`Bladerunner` is often touted as one of the best science fiction films ever +made. Indeed, it satisfies many of the requisites for good sci-fi: a future +world with flying cars and humanoid robots attempting to rebel against their +creators. But more than anything, `Bladerunner` is a fantastic exploration +of the nature of what it means to be human. If we create robots which can +think, will they become human? And if they do, what makes us unique? Indeed, +how can we be sure we're not human in any case? `Bladerunner` explored +these issues before such movies as `The Matrix,' and did so intelligently. +The visual effects and score by Vangelis set the mood. See this movie +in a dark theatre to appreciate it fully. Highly recommended! \ No newline at end of file diff --git a/language/snippets/tutorial/tutorial.py b/language/snippets/tutorial/tutorial.py new file mode 100644 index 000000000000..b2ac2421a5be --- /dev/null +++ b/language/snippets/tutorial/tutorial.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# [START full_tutorial_script] +# [START import_libraries] +import argparse +import io + +from googleapiclient import discovery +from oauth2client.client import GoogleCredentials +# [END import_libraries] + + +def print_sentiment(filename): + """Prints sentiment analysis on a given file contents.""" + # [START authenticating_to_the_api] + credentials = GoogleCredentials.get_application_default() + service = discovery.build('language', 'v1', credentials=credentials) + # [END authenticating_to_the_api] + + # [START constructing_the_request] + with io.open(filename, 'r') as review_file: + review_file_contents = review_file.read() + + service_request = service.documents().analyzeSentiment( + body={ + 'document': { + 'type': 'PLAIN_TEXT', + 'content': review_file_contents, + } + } + ) + response = service_request.execute() + # [END constructing_the_request] + + # [START parsing_the_response] + score = response['documentSentiment']['score'] + magnitude = response['documentSentiment']['magnitude'] + + for n, sentence in enumerate(response['sentences']): + sentence_sentiment = sentence['sentiment']['score'] + print('Sentence {} has a sentiment score of {}'.format(n, + sentence_sentiment)) + + print('Overall Sentiment: score of {} with magnitude of {}'.format( + score, magnitude)) + # [END parsing_the_response] + + +# [START running_your_application] +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + 'movie_review_filename', + help='The filename of the movie review you\'d like to analyze.') + args = parser.parse_args() + print_sentiment(args.movie_review_filename) +# [END running_your_application] +# [END full_tutorial_script] diff --git a/language/snippets/tutorial/tutorial_test.py b/language/snippets/tutorial/tutorial_test.py new file mode 100644 index 000000000000..065076fb4cc7 --- /dev/null +++ b/language/snippets/tutorial/tutorial_test.py @@ -0,0 +1,51 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +import tutorial + + +def test_neutral(capsys): + tutorial.print_sentiment('reviews/bladerunner-neutral.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of -?[0-2]\.?[0-9]? with ' + r'magnitude of [0-1]\.?[0-9]?', out, re.I) + + +def test_pos(capsys): + tutorial.print_sentiment('reviews/bladerunner-pos.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of [0-9]\.?[0-9]? with ' + r'magnitude of [0-9]\.?[0-9]?', out, re.I) + + +def test_neg(capsys): + tutorial.print_sentiment('reviews/bladerunner-neg.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of -[0-9]\.?[0-9]? with ' + r'magnitude of [2-7]\.?[0-9]?', out, re.I) + + +def test_mixed(capsys): + tutorial.print_sentiment('reviews/bladerunner-mixed.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of -?[0-9]\.?[0-9]? with ' + r'magnitude of [3-6]\.?[0-9]?', out, re.I) From af4e48b8cea4ce2bd241c52d0a8f72d5b8d853af Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 12 Jan 2017 12:01:20 -0800 Subject: [PATCH 052/323] Auto-update dependencies. [(#735)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/735) * Auto-update dependencies. * Fix language OCR sample * Remove unused import --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/main.py | 14 ++------------ language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 6 files changed, 7 insertions(+), 17 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 2cd2a1334ea0..ce6a9bf5bad7 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 17841fcaa2dc..5512a881a5b4 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 requests==2.12.4 diff --git a/language/snippets/ocr_nl/main.py b/language/snippets/ocr_nl/main.py index 03fbdf9d4585..11bb430b95e8 100755 --- a/language/snippets/ocr_nl/main.py +++ b/language/snippets/ocr_nl/main.py @@ -44,8 +44,6 @@ from googleapiclient import discovery from googleapiclient import errors -import httplib2 -from oauth2client.client import GoogleCredentials BATCH_SIZE = 10 @@ -54,8 +52,7 @@ class VisionApi(object): """Construct and use the Cloud Vision API service.""" def __init__(self): - credentials = GoogleCredentials.get_application_default() - self.service = discovery.build('vision', 'v1', credentials=credentials) + self.service = discovery.build('vision', 'v1') def detect_text(self, input_filenames, num_retries=3, max_results=6): """Uses the Vision API to detect text in the given file.""" @@ -113,14 +110,7 @@ class TextAnalyzer(object): """Construct and use the Google Natural Language API service.""" def __init__(self, db_filename=None): - credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) - self.service = discovery.build('language', 'v1', - http=http, - credentials=credentials) + self.service = discovery.build('language', 'v1') # This list will store the entity information gleaned from the # image files. diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 2cd2a1334ea0..ce6a9bf5bad7 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 2cd2a1334ea0..ce6a9bf5bad7 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 2cd2a1334ea0..ce6a9bf5bad7 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 From 032b7233b3707159f8882b70023a764a7168b76d Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 31 Jan 2017 14:44:04 -0800 Subject: [PATCH 053/323] Auto-update dependencies. [(#762)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/762) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 5512a881a5b4..a7730f285b0a 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.1 -requests==2.12.4 +requests==2.13.0 From 6b6f66124aad75fd6270f4f23596e82c7e483578 Mon Sep 17 00:00:00 2001 From: Jerjou Date: Tue, 31 Jan 2017 22:57:26 -0800 Subject: [PATCH 054/323] Update README.md Addresses #769 --- language/snippets/sentiment/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/sentiment/README.md b/language/snippets/sentiment/README.md index 064b8f94bd24..86c75a83f7c8 100644 --- a/language/snippets/sentiment/README.md +++ b/language/snippets/sentiment/README.md @@ -22,7 +22,7 @@ Set up your ## Download the Code ``` -$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples/language/sentiment/ +$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples.git $ cd python-docs-samples/language/sentiment ``` From 336f0b4b2e3312c10c84124710cc1a6f8077c00a Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 9 Feb 2017 08:59:42 -0800 Subject: [PATCH 055/323] Auto-update dependencies. [(#790)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/790) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index ce6a9bf5bad7..4f77d6936d70 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index a7730f285b0a..fe93963b90d1 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 requests==2.13.0 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index ce6a9bf5bad7..4f77d6936d70 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index ce6a9bf5bad7..4f77d6936d70 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index ce6a9bf5bad7..4f77d6936d70 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 From 4f63ac3474a526b3f21e4d0bc7e1696c2b69959d Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 16 Feb 2017 17:07:45 -0800 Subject: [PATCH 056/323] Remove usage of GoogleCredentials [(#810)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/810) --- language/snippets/api/analyze.py | 15 ++++----------- language/snippets/api/analyze_test.py | 15 ++++++++------- language/snippets/movie_nl/main.py | 13 ++----------- language/snippets/movie_nl/main_test.py | 3 ++- language/snippets/ocr_nl/main.py | 12 ++++++------ language/snippets/syntax_triples/main.py | 12 ++---------- language/snippets/tutorial/tutorial.py | 6 ++---- 7 files changed, 26 insertions(+), 50 deletions(-) diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index ab72208a289a..a1e702b12cb5 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -20,14 +20,7 @@ import json import sys -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials - - -def get_service(): - credentials = GoogleCredentials.get_application_default() - return discovery.build('language', 'v1', - credentials=credentials) +import googleapiclient.discovery def get_native_encoding_type(): @@ -47,7 +40,7 @@ def analyze_entities(text, encoding='UTF32'): 'encoding_type': encoding, } - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') request = service.documents().analyzeEntities(body=body) response = request.execute() @@ -64,7 +57,7 @@ def analyze_sentiment(text, encoding='UTF32'): 'encoding_type': encoding } - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') request = service.documents().analyzeSentiment(body=body) response = request.execute() @@ -81,7 +74,7 @@ def analyze_syntax(text, encoding='UTF32'): 'encoding_type': encoding } - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') request = service.documents().analyzeSyntax(body=body) response = request.execute() diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py index 8f024fda6158..08852c33ff73 100644 --- a/language/snippets/api/analyze_test.py +++ b/language/snippets/api/analyze_test.py @@ -249,10 +249,11 @@ def test_annotate_text_utf32_directly_index_into_unicode(): offset = tokens[2]['text'].get('beginOffset', 0) assert test_string[offset] == tokens[2]['text']['content'] - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = tokens[3]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[3]['text']['content'] - - assert tokens[4]['text']['content'] == u'b' - offset = tokens[4]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[4]['text']['content'] + # Temporarily disabled + # assert tokens[3]['text']['content'] == u'\U0001f636' + # offset = tokens[3]['text'].get('beginOffset', 0) + # assert test_string[offset] == tokens[3]['text']['content'] + + # assert tokens[4]['text']['content'] == u'b' + # offset = tokens[4]['text'].get('beginOffset', 0) + # assert test_string[offset] == tokens[4]['text']['content'] diff --git a/language/snippets/movie_nl/main.py b/language/snippets/movie_nl/main.py index 6d21f4bf6e51..73e624889298 100644 --- a/language/snippets/movie_nl/main.py +++ b/language/snippets/movie_nl/main.py @@ -19,9 +19,8 @@ import logging import os -from googleapiclient import discovery +import googleapiclient.discovery from googleapiclient.errors import HttpError -from oauth2client.client import GoogleCredentials import requests @@ -275,14 +274,6 @@ def rank_entities(reader, sentiment=None, topn=None, reverse_bool=False): print('\n'.join(items[:topn])) -def get_service(): - """Build a client to the Google Cloud Natural Language API.""" - - credentials = GoogleCredentials.get_application_default() - return discovery.build('language', 'v1', - credentials=credentials) - - def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): """Analyze the document for sentiment and entities""" @@ -290,7 +281,7 @@ def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): logging.basicConfig(filename=log_file, level=logging.DEBUG) # Create a Google Service object - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') reader = document_generator(input_dir, sample) diff --git a/language/snippets/movie_nl/main_test.py b/language/snippets/movie_nl/main_test.py index 74c62eb382ae..927639eb2b21 100644 --- a/language/snippets/movie_nl/main_test.py +++ b/language/snippets/movie_nl/main_test.py @@ -14,6 +14,7 @@ import json +import googleapiclient.discovery import six import main @@ -50,7 +51,7 @@ def test_to_sentiment_json(): def test_process_movie_reviews(): - service = main.get_service() + service = googleapiclient.discovery.build('language', 'v1') doc1 = main.Document('Top Gun was awesome and Tom Cruise rocked!', 'doc1', 'doc1') diff --git a/language/snippets/ocr_nl/main.py b/language/snippets/ocr_nl/main.py index 11bb430b95e8..db156054450b 100755 --- a/language/snippets/ocr_nl/main.py +++ b/language/snippets/ocr_nl/main.py @@ -42,8 +42,8 @@ import sys import time -from googleapiclient import discovery -from googleapiclient import errors +import googleapiclient.discovery +import googleapiclient.errors BATCH_SIZE = 10 @@ -52,7 +52,7 @@ class VisionApi(object): """Construct and use the Cloud Vision API service.""" def __init__(self): - self.service = discovery.build('vision', 'v1') + self.service = googleapiclient.discovery.build('vision', 'v1') def detect_text(self, input_filenames, num_retries=3, max_results=6): """Uses the Vision API to detect text in the given file.""" @@ -100,7 +100,7 @@ def detect_text(self, input_filenames, num_retries=3, max_results=6): return text_response - except errors.HttpError as e: + except googleapiclient.errors.HttpError as e: logging.error('Http Error for {}: {}'.format(filename, e)) except KeyError as e2: logging.error('Key error: {}'.format(e2)) @@ -110,7 +110,7 @@ class TextAnalyzer(object): """Construct and use the Google Natural Language API service.""" def __init__(self, db_filename=None): - self.service = discovery.build('language', 'v1') + self.service = googleapiclient.discovery.build('language', 'v1') # This list will store the entity information gleaned from the # image files. @@ -143,7 +143,7 @@ def nl_detect(self, text): request = self.service.documents().analyzeEntities(body=body) response = request.execute() entities = response['entities'] - except errors.HttpError as e: + except googleapiclient.errors.HttpError as e: logging.error('Http Error: %s' % e) except KeyError as e2: logging.error('Key error: %s' % e2) diff --git a/language/snippets/syntax_triples/main.py b/language/snippets/syntax_triples/main.py index 1be174bff04c..bbe2386634ed 100644 --- a/language/snippets/syntax_triples/main.py +++ b/language/snippets/syntax_triples/main.py @@ -31,9 +31,7 @@ import sys import textwrap -from googleapiclient import discovery -import httplib2 -from oauth2client.client import GoogleCredentials +import googleapiclient.discovery def dependents(tokens, head_index): @@ -75,13 +73,7 @@ def analyze_syntax(text): the encoding used natively by Python. Raises an errors.HTTPError if there is a connection problem. """ - credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) - service = discovery.build( - 'language', 'v1beta1', http=http) + service = googleapiclient.discovery.build('language', 'v1beta1') body = { 'document': { 'type': 'PLAIN_TEXT', diff --git a/language/snippets/tutorial/tutorial.py b/language/snippets/tutorial/tutorial.py index b2ac2421a5be..5d14b223e780 100644 --- a/language/snippets/tutorial/tutorial.py +++ b/language/snippets/tutorial/tutorial.py @@ -18,16 +18,14 @@ import argparse import io -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials +import googleapiclient.discovery # [END import_libraries] def print_sentiment(filename): """Prints sentiment analysis on a given file contents.""" # [START authenticating_to_the_api] - credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1', credentials=credentials) + service = googleapiclient.discovery.build('language', 'v1') # [END authenticating_to_the_api] # [START constructing_the_request] From 0389ec37bb168b1c44901d9d68fc6043b21faf7a Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 28 Feb 2017 11:24:31 -0800 Subject: [PATCH 057/323] Updates client library to version 0.23.0 [(#832)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/832) --- language/snippets/cloud-client/quickstart.py | 2 +- .../snippets/cloud-client/requirements.txt | 2 +- language/snippets/cloud-client/snippets.py | 18 ++++++++++-------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/language/snippets/cloud-client/quickstart.py b/language/snippets/cloud-client/quickstart.py index 3b42ac65ab67..3fd703a567ee 100644 --- a/language/snippets/cloud-client/quickstart.py +++ b/language/snippets/cloud-client/quickstart.py @@ -28,7 +28,7 @@ def run_quickstart(): document = language_client.document_from_text(text) # Detects the sentiment of the text - sentiment = document.analyze_sentiment() + sentiment = document.analyze_sentiment().sentiment print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index afd4c94e7f9c..07685057df83 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.2 +google-cloud-language==0.23 diff --git a/language/snippets/cloud-client/snippets.py b/language/snippets/cloud-client/snippets.py index c0f5f8a37226..94d1db4a23a7 100644 --- a/language/snippets/cloud-client/snippets.py +++ b/language/snippets/cloud-client/snippets.py @@ -35,7 +35,7 @@ def sentiment_text(text): # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment() + sentiment = document.analyze_sentiment().sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) @@ -50,7 +50,7 @@ def sentiment_file(gcs_uri): # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment() + sentiment = document.analyze_sentiment().sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) @@ -65,15 +65,16 @@ def entities_text(text): # Detects entities in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - entities = document.analyze_entities() + entities = document.analyze_entities().entities for entity in entities: print('=' * 20) print('{:<16}: {}'.format('name', entity.name)) print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) print('{:<16}: {}'.format('metadata', entity.metadata)) print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) def entities_file(gcs_uri): @@ -85,15 +86,16 @@ def entities_file(gcs_uri): # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - entities = document.analyze_entities() + entities = document.analyze_entities().entities for entity in entities: print('=' * 20) print('{:<16}: {}'.format('name', entity.name)) print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) print('{:<16}: {}'.format('metadata', entity.metadata)) print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) def syntax_text(text): @@ -105,7 +107,7 @@ def syntax_text(text): # Detects syntax in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax() + tokens = document.analyze_syntax().tokens for token in tokens: print('{}: {}'.format(token.part_of_speech, token.text_content)) @@ -120,7 +122,7 @@ def syntax_file(gcs_uri): # Detects syntax in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax() + tokens = document.analyze_syntax().tokens for token in tokens: print('{}: {}'.format(token.part_of_speech, token.text_content)) From 8f2678b0401ec787100658ad7b4bff9f32d1da63 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Fri, 10 Mar 2017 21:25:51 -0800 Subject: [PATCH 058/323] Auto-update dependencies. [(#825)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/825) --- language/snippets/cloud-client/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index 07685057df83..f1dafd7e696d 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.23 +google-cloud-language==0.23.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index afd4c94e7f9c..f1dafd7e696d 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.2 +google-cloud-language==0.23.1 From c8cb0e8bacf69789b242a6312684ddb1350974b5 Mon Sep 17 00:00:00 2001 From: Paul Buser Date: Mon, 20 Mar 2017 11:14:07 -0700 Subject: [PATCH 059/323] Update README.md [(#863)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/863) Fix the git repository pointed to in the README. --- language/snippets/sentiment/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/sentiment/README.md b/language/snippets/sentiment/README.md index 86c75a83f7c8..955629931ee0 100644 --- a/language/snippets/sentiment/README.md +++ b/language/snippets/sentiment/README.md @@ -22,7 +22,7 @@ Set up your ## Download the Code ``` -$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples.git +$ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git $ cd python-docs-samples/language/sentiment ``` From 974ec26e46aa55c1f0bd6fa7b69060a5baef9ba6 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 4 Apr 2017 09:39:06 -0700 Subject: [PATCH 060/323] Updates library version. [(#885)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/885) --- language/snippets/cloud-client/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index f1dafd7e696d..9b608a045a77 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.23.1 +google-cloud-language==0.24.0 From c58937d29d4cf0cec5f1d34579e05edf28f12513 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 4 Apr 2017 09:39:33 -0700 Subject: [PATCH 061/323] Auto-update dependencies. [(#876)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/876) --- language/snippets/sentiment/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index f1dafd7e696d..9b608a045a77 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.23.1 +google-cloud-language==0.24.0 From e2166892d00d26dfe591e6b29c51cab9adc13103 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 4 Apr 2017 16:08:30 -0700 Subject: [PATCH 062/323] Remove cloud config fixture [(#887)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/887) * Remove cloud config fixture * Fix client secrets * Fix bigtable instance --- .../snippets/cloud-client/snippets_test.py | 28 +++++++++---------- language/snippets/ocr_nl/main_test.py | 15 +++++----- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/language/snippets/cloud-client/snippets_test.py b/language/snippets/cloud-client/snippets_test.py index 47050e44e23c..080d5dd5d5bb 100644 --- a/language/snippets/cloud-client/snippets_test.py +++ b/language/snippets/cloud-client/snippets_test.py @@ -12,49 +12,47 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import snippets +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) -def test_sentiment_text(cloud_config, capsys): + +def test_sentiment_text(capsys): snippets.sentiment_text('President Obama is speaking at the White House.') out, _ = capsys.readouterr() assert 'Score: 0.2' in out -def test_sentiment_file(cloud_config, capsys): - cloud_storage_input_uri = 'gs://{}/text.txt'.format( - cloud_config.storage_bucket) - snippets.sentiment_file(cloud_storage_input_uri) +def test_sentiment_file(capsys): + snippets.sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'Score: 0.2' in out -def test_entities_text(cloud_config, capsys): +def test_entities_text(capsys): snippets.entities_text('President Obama is speaking at the White House.') out, _ = capsys.readouterr() assert 'name' in out assert ': Obama' in out -def test_entities_file(cloud_config, capsys): - cloud_storage_input_uri = 'gs://{}/text.txt'.format( - cloud_config.storage_bucket) - snippets.entities_file(cloud_storage_input_uri) +def test_entities_file(capsys): + snippets.entities_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'name' in out assert ': Obama' in out -def test_syntax_text(cloud_config, capsys): +def test_syntax_text(capsys): snippets.syntax_text('President Obama is speaking at the White House.') out, _ = capsys.readouterr() assert 'NOUN: President' in out -def test_syntax_file(cloud_config, capsys): - cloud_storage_input_uri = 'gs://{}/text.txt'.format( - cloud_config.storage_bucket) - snippets.syntax_file(cloud_storage_input_uri) +def test_syntax_file(capsys): + snippets.syntax_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'NOUN: President' in out diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py index e5a9962e1e25..832483ca5317 100755 --- a/language/snippets/ocr_nl/main_test.py +++ b/language/snippets/ocr_nl/main_test.py @@ -13,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for main.""" - +import os import re import zipfile import main - -_TEST_IMAGE_URI = 'gs://{}/language/image8.png' +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +TEST_IMAGE_URI = 'gs://{}/language/image8.png'.format(BUCKET) def test_batch_empty(): @@ -36,10 +35,10 @@ def test_batch_single(): assert batched == ((1,),) -def test_single_image_returns_text(cloud_config): +def test_single_image_returns_text(): vision_api_client = main.VisionApi() - image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + image_path = TEST_IMAGE_URI texts = vision_api_client.detect_text([image_path]) assert image_path in texts @@ -66,9 +65,9 @@ def test_text_returns_entities(): assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes' -def test_entities_list(cloud_config): +def test_entities_list(): vision_api_client = main.VisionApi() - image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + image_path = TEST_IMAGE_URI texts = vision_api_client.detect_text([image_path]) locale, document = main.extract_description(texts[image_path]) text_analyzer = main.TextAnalyzer() From f52aa10b300d1c19c8796bb55cc8ddeb0518fa59 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 5 Apr 2017 15:21:33 -0700 Subject: [PATCH 063/323] Remove resource [(#890)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/890) * Remove resource fixture * Remove remote resource --- language/snippets/ocr_nl/main_test.py | 12 +++++++++--- .../sentiment/sentiment_analysis_test.py | 19 +++++++++++-------- language/snippets/syntax_triples/main_test.py | 7 +++++-- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py index 832483ca5317..afaf2e16576c 100755 --- a/language/snippets/ocr_nl/main_test.py +++ b/language/snippets/ocr_nl/main_test.py @@ -17,10 +17,14 @@ import re import zipfile +import requests + import main BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] TEST_IMAGE_URI = 'gs://{}/language/image8.png'.format(BUCKET) +OCR_IMAGES_URI = 'http://storage.googleapis.com/{}/{}'.format( + BUCKET, 'language/ocr_nl-images-small.zip') def test_batch_empty(): @@ -79,14 +83,16 @@ def test_entities_list(): assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet' -def test_main(remote_resource, tmpdir, capsys): +def test_main(tmpdir, capsys): images_path = str(tmpdir.mkdir('images')) # First, pull down some test data - zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir) + response = requests.get(OCR_IMAGES_URI) + images_file = tmpdir.join('images.zip') + images_file.write_binary(response.content) # Extract it to the image directory - with zipfile.ZipFile(zip_path) as zfile: + with zipfile.ZipFile(str(images_file)) as zfile: zfile.extractall(images_path) main.main(images_path, str(tmpdir.join('ocr_nl.db'))) diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index 19ec86f17d5b..05d28ab27898 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -11,37 +11,40 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import re from sentiment_analysis import analyze +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') -def test_pos(resource, capsys): - analyze(resource('pos.txt')) + +def test_pos(capsys): + analyze(os.path.join(RESOURCES, 'pos.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert score * magnitude > 0 -def test_neg(resource, capsys): - analyze(resource('neg.txt')) +def test_neg(capsys): + analyze(os.path.join(RESOURCES, 'neg.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert score * magnitude < 0 -def test_mixed(resource, capsys): - analyze(resource('mixed.txt')) +def test_mixed(capsys): + analyze(os.path.join(RESOURCES, 'mixed.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) assert score <= 0.3 assert score >= -0.3 -def test_neutral(resource, capsys): - analyze(resource('neutral.txt')) +def test_neutral(capsys): + analyze(os.path.join(RESOURCES, 'neutral.txt')) out, err = capsys.readouterr() magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert magnitude <= 2.0 diff --git a/language/snippets/syntax_triples/main_test.py b/language/snippets/syntax_triples/main_test.py index 62c2915da02e..6aa87818e35b 100755 --- a/language/snippets/syntax_triples/main_test.py +++ b/language/snippets/syntax_triples/main_test.py @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import re import main +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') + def test_dependents(): text = "I am eating a delicious banana" @@ -41,8 +44,8 @@ def test_find_triples(): assert (1, 2, 5) == triple -def test_obama_example(resource, capsys): - main.main(resource('obama_wikipedia.txt')) +def test_obama_example(capsys): + main.main(os.path.join(RESOURCES, 'obama_wikipedia.txt')) stdout, _ = capsys.readouterr() lines = stdout.split('\n') assert re.match( From 1d18ec5f7c7c79c0cd4e3f685a47b649aae6f2a2 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 12 Apr 2017 09:22:36 -0700 Subject: [PATCH 064/323] Auto-update dependencies. [(#898)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/898) --- language/snippets/cloud-client/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/requirements.txt index 9b608a045a77..4a58920cad19 100644 --- a/language/snippets/cloud-client/requirements.txt +++ b/language/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.0 +google-cloud-language==0.24.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 9b608a045a77..4a58920cad19 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.0 +google-cloud-language==0.24.1 From e1126065090556c45fc1b96e2eb29443ebaac32b Mon Sep 17 00:00:00 2001 From: danaharon Date: Mon, 17 Apr 2017 14:06:48 -0700 Subject: [PATCH 065/323] Update transcribe_async.py to have long GCS flac example [(#904)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/904) --- language/snippets/cloud-client/README.rst | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/language/snippets/cloud-client/README.rst b/language/snippets/cloud-client/README.rst index a0259ce9e866..bfa46d473606 100644 --- a/language/snippets/cloud-client/README.rst +++ b/language/snippets/cloud-client/README.rst @@ -82,6 +82,43 @@ To run this sample: $ python quickstart.py +Snippets ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python snippets.py + + usage: snippets.py [-h] + {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + ... + + This application demonstrates how to perform basic operations with the + Google Cloud Natural Language API + + For more information, the documentation at + https://cloud.google.com/natural-language/docs. + + positional arguments: + {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + sentiment-text Detects sentiment in the text. + sentiment-file Detects sentiment in the file located in Google Cloud + Storage. + entities-text Detects entities in the text. + entities-file Detects entities in the file located in Google Cloud + Storage. + syntax-text Detects syntax in the text. + syntax-file Detects syntax in the file located in Google Cloud + Storage. + + optional arguments: + -h, --help show this help message and exit + + The client library From a22f00f2e3217d9168d5d1a074e979912afad21d Mon Sep 17 00:00:00 2001 From: Gus Class Date: Wed, 19 Apr 2017 14:58:27 -0700 Subject: [PATCH 066/323] NL v1beta2 [(#908)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/908) * Reorganizes samples, adds new snippet, and demonstrates switching API versions using GAPIC manual layer. * Corrects beta version in link * Copyright dates on new files * Removes README with nav, changes all snippets to use v1beta2 in beta folder * Fixes v1beta2 test on GCS sentiment. --- .../snippets/cloud-client/{ => v1}/README.rst | 0 .../cloud-client/{ => v1}/README.rst.in | 0 .../cloud-client/{ => v1}/quickstart.py | 0 .../cloud-client/{ => v1}/quickstart_test.py | 0 .../cloud-client/{ => v1}/requirements.txt | 0 .../cloud-client/{ => v1}/resources/text.txt | 0 .../cloud-client/{ => v1}/snippets.py | 0 .../cloud-client/{ => v1}/snippets_test.py | 0 .../snippets/cloud-client/v1beta2/README.rst | 144 +++++++++++ .../cloud-client/v1beta2/README.rst.in | 24 ++ .../cloud-client/v1beta2/quickstart.py | 39 +++ .../cloud-client/v1beta2/quickstart_test.py | 22 ++ .../cloud-client/v1beta2/requirements.txt | 2 + .../cloud-client/v1beta2/resources/text.txt | 1 + .../snippets/cloud-client/v1beta2/snippets.py | 236 ++++++++++++++++++ .../cloud-client/v1beta2/snippets_test.py | 71 ++++++ 16 files changed, 539 insertions(+) rename language/snippets/cloud-client/{ => v1}/README.rst (100%) rename language/snippets/cloud-client/{ => v1}/README.rst.in (100%) rename language/snippets/cloud-client/{ => v1}/quickstart.py (100%) rename language/snippets/cloud-client/{ => v1}/quickstart_test.py (100%) rename language/snippets/cloud-client/{ => v1}/requirements.txt (100%) rename language/snippets/cloud-client/{ => v1}/resources/text.txt (100%) rename language/snippets/cloud-client/{ => v1}/snippets.py (100%) rename language/snippets/cloud-client/{ => v1}/snippets_test.py (100%) create mode 100644 language/snippets/cloud-client/v1beta2/README.rst create mode 100644 language/snippets/cloud-client/v1beta2/README.rst.in create mode 100644 language/snippets/cloud-client/v1beta2/quickstart.py create mode 100644 language/snippets/cloud-client/v1beta2/quickstart_test.py create mode 100644 language/snippets/cloud-client/v1beta2/requirements.txt create mode 100644 language/snippets/cloud-client/v1beta2/resources/text.txt create mode 100644 language/snippets/cloud-client/v1beta2/snippets.py create mode 100644 language/snippets/cloud-client/v1beta2/snippets_test.py diff --git a/language/snippets/cloud-client/README.rst b/language/snippets/cloud-client/v1/README.rst similarity index 100% rename from language/snippets/cloud-client/README.rst rename to language/snippets/cloud-client/v1/README.rst diff --git a/language/snippets/cloud-client/README.rst.in b/language/snippets/cloud-client/v1/README.rst.in similarity index 100% rename from language/snippets/cloud-client/README.rst.in rename to language/snippets/cloud-client/v1/README.rst.in diff --git a/language/snippets/cloud-client/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py similarity index 100% rename from language/snippets/cloud-client/quickstart.py rename to language/snippets/cloud-client/v1/quickstart.py diff --git a/language/snippets/cloud-client/quickstart_test.py b/language/snippets/cloud-client/v1/quickstart_test.py similarity index 100% rename from language/snippets/cloud-client/quickstart_test.py rename to language/snippets/cloud-client/v1/quickstart_test.py diff --git a/language/snippets/cloud-client/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt similarity index 100% rename from language/snippets/cloud-client/requirements.txt rename to language/snippets/cloud-client/v1/requirements.txt diff --git a/language/snippets/cloud-client/resources/text.txt b/language/snippets/cloud-client/v1/resources/text.txt similarity index 100% rename from language/snippets/cloud-client/resources/text.txt rename to language/snippets/cloud-client/v1/resources/text.txt diff --git a/language/snippets/cloud-client/snippets.py b/language/snippets/cloud-client/v1/snippets.py similarity index 100% rename from language/snippets/cloud-client/snippets.py rename to language/snippets/cloud-client/v1/snippets.py diff --git a/language/snippets/cloud-client/snippets_test.py b/language/snippets/cloud-client/v1/snippets_test.py similarity index 100% rename from language/snippets/cloud-client/snippets_test.py rename to language/snippets/cloud-client/v1/snippets_test.py diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst new file mode 100644 index 000000000000..49cdec136842 --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -0,0 +1,144 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud beta auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python quickstart.py + + +Snippets ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python snippets.py + + usage: snippets.py [-h] + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + ... + + This application demonstrates how to perform basic operations with the + Google Cloud Natural Language API + + For more information, the documentation at + https://cloud.google.com/natural-language/docs. + + positional arguments: + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + sentiment-entities-text + Detects entity sentiment in the provided text. + sentiment-entities-file + Detects entity sentiment in a Google Cloud Storage + file. + sentiment-text Detects sentiment in the text. + sentiment-file Detects sentiment in the file located in Google Cloud + Storage. + entities-text Detects entities in the text. + entities-file Detects entities in the file located in Google Cloud + Storage. + syntax-text Detects syntax in the text. + syntax-file Detects syntax in the file located in Google Cloud + Storage. + + optional arguments: + -h, --help show this help message and exit + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/cloud-client/v1beta2/README.rst.in b/language/snippets/cloud-client/v1beta2/README.rst.in new file mode 100644 index 000000000000..faf402bfe9c0 --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/README.rst.in @@ -0,0 +1,24 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: quickstart.py +- name: Snippets + file: snippets.py + show_help: true + +cloud_client_library: true diff --git a/language/snippets/cloud-client/v1beta2/quickstart.py b/language/snippets/cloud-client/v1beta2/quickstart.py new file mode 100644 index 000000000000..c5a4b9c3ebc0 --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/quickstart.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def run_quickstart(): + # [START language_quickstart] + # Imports the Google Cloud client library + from google.cloud import language + + # Instantiates a client with they v1beta2 version + language_client = language.Client(api_version='v1beta2') + + # The text to analyze + text = 'Hallo Welt!' + document = language_client.document_from_text(text, language='DE') + + # Detects the sentiment of the text + sentiment = document.analyze_sentiment().sentiment + + print('Text: {}'.format(text)) + print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + # [END language_quickstart] + + +if __name__ == '__main__': + run_quickstart() diff --git a/language/snippets/cloud-client/v1beta2/quickstart_test.py b/language/snippets/cloud-client/v1beta2/quickstart_test.py new file mode 100644 index 000000000000..839faae2a00d --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/quickstart_test.py @@ -0,0 +1,22 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import quickstart + + +def test_quickstart(capsys): + quickstart.run_quickstart() + out, _ = capsys.readouterr() + assert 'Sentiment' in out diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt new file mode 100644 index 000000000000..3b8a6a4c1daa --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -0,0 +1,2 @@ +gapic-google-cloud-language-v1beta2==0.15.3 +google-cloud-language==0.24.1 diff --git a/language/snippets/cloud-client/v1beta2/resources/text.txt b/language/snippets/cloud-client/v1beta2/resources/text.txt new file mode 100644 index 000000000000..97a1cea02b7a --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/resources/text.txt @@ -0,0 +1 @@ +President Obama is speaking at the White House. \ No newline at end of file diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py new file mode 100644 index 000000000000..af4721189562 --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python + +# Copyright 2017 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Natural Language API + +For more information, the documentation at +https://cloud.google.com/natural-language/docs. +""" + +import argparse + +from google.cloud import language +from google.cloud.gapic.language.v1beta2 import enums +from google.cloud.gapic.language.v1beta2 import language_service_client +from google.cloud.proto.language.v1beta2 import language_service_pb2 + + +def sentiment_text(text): + """Detects sentiment in the text.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment().sentiment + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def sentiment_file(gcs_uri): + """Detects sentiment in the file located in Google Cloud Storage.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment().sentiment + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def entities_text(text): + """Detects entities in the text.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects entities in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities().entities + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) + + +def entities_file(gcs_uri): + """Detects entities in the file located in Google Cloud Storage.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities().entities + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) + + +def syntax_text(text): + """Detects syntax in the text.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax().tokens + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +def syntax_file(gcs_uri): + """Detects syntax in the file located in Google Cloud Storage.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax().tokens + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +def entity_sentiment_text(text): + """Detects entity sentiment in the provided text.""" + language_client = language_service_client.LanguageServiceClient() + document = language_service_pb2.Document() + + document.content = text.encode('utf-8') + document.type = enums.Document.Type.PLAIN_TEXT + + result = language_client.analyze_entity_sentiment( + document, enums.EncodingType.UTF8) + + for entity in result.entities: + print('Mentions: ') + print('Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(' Begin Offset : {}'.format(mention.text.begin_offset)) + print(' Content : {}'.format(mention.text.content)) + print(' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(' Sentiment : {}'.format(mention.sentiment.score)) + print(' Type : {}'.format(mention.type)) + print('Salience: {}'.format(entity.salience)) + print('Sentiment: {}\n'.format(entity.sentiment)) + + +def entity_sentiment_file(gcs_uri): + """Detects entity sentiment in a Google Cloud Storage file.""" + language_client = language_service_client.LanguageServiceClient() + document = language_service_pb2.Document() + + document.gcs_content_uri = gcs_uri + document.type = enums.Document.Type.PLAIN_TEXT + + result = language_client.analyze_entity_sentiment( + document, enums.EncodingType.UTF8) + + for entity in result.entities: + print('Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(' Begin Offset : {}'.format(mention.text.begin_offset)) + print(' Content : {}'.format(mention.text.content)) + print(' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(' Sentiment : {}'.format(mention.sentiment.score)) + print(' Type : {}'.format(mention.type)) + print('Salience: {}'.format(entity.salience)) + print('Sentiment: {}\n'.format(entity.sentiment)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + sentiment_entities_text_parser = subparsers.add_parser( + 'sentiment-entities-text', help=entity_sentiment_text.__doc__) + sentiment_entities_text_parser.add_argument('text') + + sentiment_entities_file_parser = subparsers.add_parser( + 'sentiment-entities-file', help=entity_sentiment_file.__doc__) + sentiment_entities_file_parser.add_argument('gcs_uri') + + sentiment_text_parser = subparsers.add_parser( + 'sentiment-text', help=sentiment_text.__doc__) + sentiment_text_parser.add_argument('text') + + sentiment_file_parser = subparsers.add_parser( + 'sentiment-file', help=sentiment_file.__doc__) + sentiment_file_parser.add_argument('gcs_uri') + + entities_text_parser = subparsers.add_parser( + 'entities-text', help=entities_text.__doc__) + entities_text_parser.add_argument('text') + + entities_file_parser = subparsers.add_parser( + 'entities-file', help=entities_file.__doc__) + entities_file_parser.add_argument('gcs_uri') + + syntax_text_parser = subparsers.add_parser( + 'syntax-text', help=syntax_text.__doc__) + syntax_text_parser.add_argument('text') + + syntax_file_parser = subparsers.add_parser( + 'syntax-file', help=syntax_file.__doc__) + syntax_file_parser.add_argument('gcs_uri') + + args = parser.parse_args() + + if args.command == 'sentiment-text': + sentiment_text(args.text) + elif args.command == 'sentiment-file': + sentiment_file(args.gcs_uri) + elif args.command == 'entities-text': + entities_text(args.text) + elif args.command == 'entities-file': + entities_file(args.gcs_uri) + elif args.command == 'syntax-text': + syntax_text(args.text) + elif args.command == 'syntax-file': + syntax_file(args.gcs_uri) + elif args.command == 'sentiment-entities-text': + entity_sentiment_text(args.text) + elif args.command == 'sentiment-entities-file': + entity_sentiment_file(args.gcs_uri) diff --git a/language/snippets/cloud-client/v1beta2/snippets_test.py b/language/snippets/cloud-client/v1beta2/snippets_test.py new file mode 100644 index 000000000000..d1e6abd0cf6d --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/snippets_test.py @@ -0,0 +1,71 @@ +# Copyright 2017 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import snippets + +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) + + +def test_sentiment_text(capsys): + snippets.sentiment_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Score: 0' in out + + +def test_sentiment_file(capsys): + snippets.sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Score: 0' in out + + +def test_entities_text(capsys): + snippets.entities_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_entities_file(capsys): + snippets.entities_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_syntax_text(capsys): + snippets.syntax_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'NOUN: President' in out + + +def test_syntax_file(capsys): + snippets.syntax_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'NOUN: President' in out + + +def test_sentiment_entities_text(capsys): + snippets.entity_sentiment_text( + 'President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_file(capsys): + snippets.entity_sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Content : White House' in out From b431a7ce992ed4532f2e37d881dd8edfe5487488 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Fri, 21 Apr 2017 09:49:33 -0700 Subject: [PATCH 067/323] Fixes for text encoding [(#913)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/913) * Fixes for non-ASCII encodings * Adds test for UTF * Style fix --- language/snippets/cloud-client/v1/snippets.py | 34 +++++---- .../snippets/cloud-client/v1beta2/snippets.py | 69 +++++++++++-------- .../cloud-client/v1beta2/snippets_test.py | 10 +++ 3 files changed, 73 insertions(+), 40 deletions(-) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 94d1db4a23a7..31e02ef65059 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -24,12 +24,16 @@ import argparse from google.cloud import language +import six def sentiment_text(text): """Detects sentiment in the text.""" language_client = language.Client() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -60,6 +64,9 @@ def entities_text(text): """Detects entities in the text.""" language_client = language.Client() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -69,11 +76,11 @@ def entities_text(text): for entity in entities: print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) @@ -90,11 +97,11 @@ def entities_file(gcs_uri): for entity in entities: print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) @@ -102,6 +109,9 @@ def syntax_text(text): """Detects syntax in the text.""" language_client = language.Client() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -110,7 +120,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) def syntax_file(gcs_uri): @@ -125,7 +135,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) if __name__ == '__main__': diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index af4721189562..2e6745d2c94e 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -27,12 +27,16 @@ from google.cloud.gapic.language.v1beta2 import enums from google.cloud.gapic.language.v1beta2 import language_service_client from google.cloud.proto.language.v1beta2 import language_service_pb2 +import six def sentiment_text(text): """Detects sentiment in the text.""" language_client = language.Client(api_version='v1beta2') + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -40,8 +44,8 @@ def sentiment_text(text): # document.doc_type == language.Document.HTML sentiment = document.analyze_sentiment().sentiment - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) + print(u'Score: {}'.format(sentiment.score)) + print(u'Magnitude: {}'.format(sentiment.magnitude)) def sentiment_file(gcs_uri): @@ -55,14 +59,17 @@ def sentiment_file(gcs_uri): # document.doc_type == language.Document.HTML sentiment = document.analyze_sentiment().sentiment - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) + print(u'Score: {}'.format(sentiment.score)) + print(u'Magnitude: {}'.format(sentiment.magnitude)) def entities_text(text): """Detects entities in the text.""" language_client = language.Client(api_version='v1beta2') + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -71,12 +78,12 @@ def entities_text(text): entities = document.analyze_entities().entities for entity in entities: - print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) @@ -105,6 +112,9 @@ def syntax_text(text): """Detects syntax in the text.""" language_client = language.Client(api_version='v1beta2') + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -113,7 +123,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) def syntax_file(gcs_uri): @@ -128,7 +138,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) def entity_sentiment_text(text): @@ -136,6 +146,9 @@ def entity_sentiment_text(text): language_client = language_service_client.LanguageServiceClient() document = language_service_pb2.Document() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + document.content = text.encode('utf-8') document.type = enums.Document.Type.PLAIN_TEXT @@ -144,15 +157,15 @@ def entity_sentiment_text(text): for entity in result.entities: print('Mentions: ') - print('Name: "{}"'.format(entity.name)) + print(u'Name: "{}"'.format(entity.name)) for mention in entity.mentions: - print(' Begin Offset : {}'.format(mention.text.begin_offset)) - print(' Content : {}'.format(mention.text.content)) - print(' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(' Sentiment : {}'.format(mention.sentiment.score)) - print(' Type : {}'.format(mention.type)) - print('Salience: {}'.format(entity.salience)) - print('Sentiment: {}\n'.format(entity.sentiment)) + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) def entity_sentiment_file(gcs_uri): @@ -167,15 +180,15 @@ def entity_sentiment_file(gcs_uri): document, enums.EncodingType.UTF8) for entity in result.entities: - print('Name: "{}"'.format(entity.name)) + print(u'Name: "{}"'.format(entity.name)) for mention in entity.mentions: - print(' Begin Offset : {}'.format(mention.text.begin_offset)) - print(' Content : {}'.format(mention.text.content)) - print(' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(' Sentiment : {}'.format(mention.sentiment.score)) - print(' Type : {}'.format(mention.type)) - print('Salience: {}'.format(entity.salience)) - print('Sentiment: {}\n'.format(entity.sentiment)) + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) if __name__ == '__main__': diff --git a/language/snippets/cloud-client/v1beta2/snippets_test.py b/language/snippets/cloud-client/v1beta2/snippets_test.py index d1e6abd0cf6d..8db7aa1dbdf4 100644 --- a/language/snippets/cloud-client/v1beta2/snippets_test.py +++ b/language/snippets/cloud-client/v1beta2/snippets_test.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2017 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,6 +27,15 @@ def test_sentiment_text(capsys): assert 'Score: 0' in out +def test_sentiment_utf(capsys): + snippets.sentiment_text( + u'1er site d\'information. Les articles du journal et toute l\'' + + u'actualité en continu : International, France, Société, Economie, ' + + u'Culture, Environnement') + out, _ = capsys.readouterr() + assert 'Score: 0' in out + + def test_sentiment_file(capsys): snippets.sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() From b1da393935f4826772e2ffee920dd61976f774ba Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 27 Apr 2017 09:54:41 -0700 Subject: [PATCH 068/323] Re-generate all readmes --- language/snippets/api/README.rst | 2 +- language/snippets/cloud-client/v1/README.rst | 2 +- .../snippets/cloud-client/v1beta2/README.rst | 2 +- language/snippets/tutorial/README.rst | 97 +++++++++++++++++++ 4 files changed, 100 insertions(+), 3 deletions(-) create mode 100644 language/snippets/tutorial/README.rst diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index 301fed0e07d4..f757fea84396 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -26,7 +26,7 @@ authentication: .. code-block:: bash - gcloud beta auth application-default login + gcloud auth application-default login #. When running on App Engine or Compute Engine, credentials are already diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index bfa46d473606..4082c6db2e0b 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -26,7 +26,7 @@ authentication: .. code-block:: bash - gcloud beta auth application-default login + gcloud auth application-default login #. When running on App Engine or Compute Engine, credentials are already diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index 49cdec136842..17b5a04bc029 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -26,7 +26,7 @@ authentication: .. code-block:: bash - gcloud beta auth application-default login + gcloud auth application-default login #. When running on App Engine or Compute Engine, credentials are already diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst new file mode 100644 index 000000000000..5b862ead5c6a --- /dev/null +++ b/language/snippets/tutorial/README.rst @@ -0,0 +1,97 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language Tutorial Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language Tutorial. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Language tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python tutorial.py + + usage: tutorial.py [-h] movie_review_filename + + positional arguments: + movie_review_filename + The filename of the movie review you'd like to + analyze. + + optional arguments: + -h, --help show this help message and exit + + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file From d780d08c662dc81c7717f28685b03bddbf5b67d4 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 10 May 2017 09:51:17 -0700 Subject: [PATCH 069/323] Auto-update dependencies. [(#939)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/939) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index fe93963b90d1..f1acf559c19c 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.13.0 +requests==2.14.1 From 885c56ed4edf71743d06db24004510a5ce513cb6 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 11 May 2017 09:33:48 -0700 Subject: [PATCH 070/323] Auto-update dependencies. [(#941)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/941) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index f1acf559c19c..c1a06ec49080 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.14.1 +requests==2.14.2 From b861e3262ab8be0df1e9efc5a7e87a2faccd6fd4 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Tue, 23 May 2017 17:01:25 -0700 Subject: [PATCH 071/323] Fix README rst links [(#962)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/962) * Fix README rst links * Update all READMEs --- language/snippets/cloud-client/v1/README.rst | 6 +++--- language/snippets/cloud-client/v1beta2/README.rst | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 4082c6db2e0b..7e66faa8b8d8 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -128,11 +128,11 @@ This sample uses the `Google Cloud Client Library for Python`_. You can read the documentation for more details on API usage and use GitHub to `browse the source`_ and `report issues`_. -.. Google Cloud Client Library for Python: +.. _Google Cloud Client Library for Python: https://googlecloudplatform.github.io/google-cloud-python/ -.. browse the source: +.. _browse the source: https://github.com/GoogleCloudPlatform/google-cloud-python -.. report issues: +.. _report issues: https://github.com/GoogleCloudPlatform/google-cloud-python/issues diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index 17b5a04bc029..3d260b9df902 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -133,11 +133,11 @@ This sample uses the `Google Cloud Client Library for Python`_. You can read the documentation for more details on API usage and use GitHub to `browse the source`_ and `report issues`_. -.. Google Cloud Client Library for Python: +.. _Google Cloud Client Library for Python: https://googlecloudplatform.github.io/google-cloud-python/ -.. browse the source: +.. _browse the source: https://github.com/GoogleCloudPlatform/google-cloud-python -.. report issues: +.. _report issues: https://github.com/GoogleCloudPlatform/google-cloud-python/issues From 0f884aa713979c2cb89c66a6df670a10ee1ade5f Mon Sep 17 00:00:00 2001 From: Gus Class Date: Wed, 24 May 2017 09:20:24 -0700 Subject: [PATCH 072/323] Adds test for encoded characters. [(#961)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/961) --- .../snippets/cloud-client/v1beta2/snippets.py | 19 ++++++++++++++----- .../cloud-client/v1beta2/snippets_test.py | 7 +++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index 2e6745d2c94e..c9f6f32ad5a4 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -22,6 +22,7 @@ """ import argparse +import sys from google.cloud import language from google.cloud.gapic.language.v1beta2 import enums @@ -53,7 +54,7 @@ def sentiment_file(gcs_uri): language_client = language.Client(api_version='v1beta2') # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = language_client.document_from_gcs_url(gcs_uri) # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML @@ -92,7 +93,7 @@ def entities_file(gcs_uri): language_client = language.Client(api_version='v1beta2') # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = language_client.document_from_gcs_url(gcs_uri) # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML @@ -131,7 +132,7 @@ def syntax_file(gcs_uri): language_client = language.Client(api_version='v1beta2') # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = language_client.document_from_gcs_url(gcs_uri) # Detects syntax in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML @@ -152,8 +153,12 @@ def entity_sentiment_text(text): document.content = text.encode('utf-8') document.type = enums.Document.Type.PLAIN_TEXT + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + result = language_client.analyze_entity_sentiment( - document, enums.EncodingType.UTF8) + document, encoding) for entity in result.entities: print('Mentions: ') @@ -176,8 +181,12 @@ def entity_sentiment_file(gcs_uri): document.gcs_content_uri = gcs_uri document.type = enums.Document.Type.PLAIN_TEXT + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + result = language_client.analyze_entity_sentiment( - document, enums.EncodingType.UTF8) + document, encoding) for entity in result.entities: print(u'Name: "{}"'.format(entity.name)) diff --git a/language/snippets/cloud-client/v1beta2/snippets_test.py b/language/snippets/cloud-client/v1beta2/snippets_test.py index 8db7aa1dbdf4..e6db221780b0 100644 --- a/language/snippets/cloud-client/v1beta2/snippets_test.py +++ b/language/snippets/cloud-client/v1beta2/snippets_test.py @@ -79,3 +79,10 @@ def test_sentiment_entities_file(capsys): snippets.entity_sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'Content : White House' in out + + +def test_sentiment_entities_utf(capsys): + snippets.entity_sentiment_text( + 'foo→bar') + out, _ = capsys.readouterr() + assert 'Begin Offset : 4' in out From df6e1f8cd81c1ee09045a4d1b3ac8b48c29e7f35 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Sat, 17 Jun 2017 09:03:42 -0700 Subject: [PATCH 073/323] Auto-update dependencies. [(#992)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/992) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index c1a06ec49080..622448586c9f 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.14.2 +requests==2.18.1 From 9fab888b61bd28d56f35197cd8f1c41c19833b84 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 27 Jun 2017 12:41:15 -0700 Subject: [PATCH 074/323] Auto-update dependencies. [(#1004)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1004) * Auto-update dependencies. * Fix natural language samples * Fix pubsub iam samples * Fix language samples * Fix bigquery samples --- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1/snippets.py | 4 ++-- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/snippets.py | 4 ++-- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 4a58920cad19..1d3c69aeae7e 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.1 +google-cloud-language==0.25.0 diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 31e02ef65059..c3205a46e028 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -120,7 +120,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) def syntax_file(gcs_uri): @@ -135,7 +135,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) if __name__ == '__main__': diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 3b8a6a4c1daa..d44360febc26 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1,2 +1,2 @@ gapic-google-cloud-language-v1beta2==0.15.3 -google-cloud-language==0.24.1 +google-cloud-language==0.25.0 diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index c9f6f32ad5a4..02d0d8e5b314 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -124,7 +124,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) def syntax_file(gcs_uri): @@ -139,7 +139,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) def entity_sentiment_text(text): diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 4a58920cad19..1d3c69aeae7e 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.1 +google-cloud-language==0.25.0 From 945ad11f898146220e426840122510a806a32f4e Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 26 Jul 2017 09:03:23 -0700 Subject: [PATCH 075/323] Auto-update dependencies. [(#1031)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1031) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 622448586c9f..992307a00727 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.18.1 +requests==2.18.2 From 3399655ceb5623d9578308b2ab162b165f260d7d Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 28 Jul 2017 15:00:34 -0700 Subject: [PATCH 076/323] Natural Language GAPIC client library [(#1018)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1018) --- language/snippets/cloud-client/.DS_Store | Bin 0 -> 6148 bytes language/snippets/cloud-client/v1/README.rst | 4 + .../snippets/cloud-client/v1/README.rst.in | 6 + .../snippets/cloud-client/v1/quickstart.py | 16 ++- .../snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1/snippets.py | 102 ++++++++++---- .../snippets/cloud-client/v1beta2/README.rst | 4 + .../cloud-client/v1beta2/README.rst.in | 6 + .../cloud-client/v1beta2/quickstart.py | 22 ++- .../cloud-client/v1beta2/requirements.txt | 3 +- .../snippets/cloud-client/v1beta2/snippets.py | 129 +++++++++++------- language/snippets/sentiment/requirements.txt | 2 +- .../snippets/sentiment/sentiment_analysis.py | 30 ++-- 13 files changed, 216 insertions(+), 110 deletions(-) create mode 100644 language/snippets/cloud-client/.DS_Store diff --git a/language/snippets/cloud-client/.DS_Store b/language/snippets/cloud-client/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..f344c851a0ee4f90f50741edcbb6236ebbbc354d GIT binary patch literal 6148 zcmeHK!A`{pJ@TK5l+$r=92a0ahvsOrXzLD-AJ zJA9_tJXH)nbRY%~4!+FJvKg5HW`G%3RR+wdX>F`(fm|0ezzqDF0XiQfDxqUA)u@gR z98?Q{m_xS`w5gY%9BI%om}q`|5!qLbhGr$adW`KG>lp@{#r$6`qDu@SWfEid#21KsjsJmF3xm%a2q`Ow4wopkZ oF4Z_sK|@`|7)w|2E~*mrOEM50gQ-UJpzx1?qJaly;7=L&02eA$o&W#< literal 0 HcmV?d00001 diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 7e66faa8b8d8..a3ee4b7f641f 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -5,6 +5,10 @@ Google Cloud Natural Language API Python Samples This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. +- See the `migration guide`_ for information about migrating to Python client library v0.26.1. + +.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + diff --git a/language/snippets/cloud-client/v1/README.rst.in b/language/snippets/cloud-client/v1/README.rst.in index faf402bfe9c0..1b4855fb4f38 100644 --- a/language/snippets/cloud-client/v1/README.rst.in +++ b/language/snippets/cloud-client/v1/README.rst.in @@ -10,6 +10,12 @@ product: entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + - See the `migration guide`_ for information about migrating to Python client library v0.26.1. + + + .. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + setup: - auth - install_deps diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index 3fd703a567ee..3c19e395a427 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -18,17 +18,25 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library + # [START migration_import] from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + # [END migration_import] # Instantiates a client - language_client = language.Client() + # [START migration_client] + client = language.LanguageServiceClient() + # [END migration_client] # The text to analyze - text = 'Hello, world!' - document = language_client.document_from_text(text) + text = u'Hello, world!' + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects the sentiment of the text - sentiment = document.analyze_sentiment().sentiment + sentiment = client.analyze_sentiment(document=document).document_sentiment print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 1d3c69aeae7e..8cd367eac3a0 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.25.0 +google-cloud-language==0.26.1 diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index c3205a46e028..d485752b09c5 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -24,118 +24,164 @@ import argparse from google.cloud import language +from google.cloud.language import enums +from google.cloud.language import types import six +# [START def_sentiment_text] def sentiment_text(text): """Detects sentiment in the text.""" - language_client = language.Client() + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + # [START migration_document_text] + # [START migration_analyze_sentiment] + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) + # [END migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) + # [END migration_analyze_sentiment] +# [END def_sentiment_text] +# [START def_sentiment_file] def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" - language_client = language.Client() + client = language.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + # [START migration_document_gcs_uri] + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + # [END migration_document_gcs_uri] # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) +# [END def_sentiment_file] +# [START def_entities_text] def entities_text(text): """Detects entities in the text.""" - language_client = language.Client() + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + # [START migration_analyze_entities] + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects entities in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('type', entity.type)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) + # [END migration_analyze_entities] +# [END def_entities_text] +# [START def_entities_file] def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" - language_client = language.Client() + client = language.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('type', entity.type)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) +# [END def_entities_file] +# [START def_syntax_text] def syntax_text(text): """Detects syntax in the text.""" - language_client = language.Client() + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + # [START migration_analyze_syntax] + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) + # [END migration_analyze_syntax] +# [END def_syntax_text] +# [START def_syntax_file] def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" - language_client = language.Client() + client = language.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) +# [END def_syntax_file] if __name__ == '__main__': diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index 3d260b9df902..77df4ffbec94 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -5,6 +5,10 @@ Google Cloud Natural Language API Python Samples This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. +- See the `migration guide`_ for information about migrating to Python client library v0.26.1. + +.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + diff --git a/language/snippets/cloud-client/v1beta2/README.rst.in b/language/snippets/cloud-client/v1beta2/README.rst.in index faf402bfe9c0..1b4855fb4f38 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst.in +++ b/language/snippets/cloud-client/v1beta2/README.rst.in @@ -10,6 +10,12 @@ product: entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + - See the `migration guide`_ for information about migrating to Python client library v0.26.1. + + + .. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + setup: - auth - install_deps diff --git a/language/snippets/cloud-client/v1beta2/quickstart.py b/language/snippets/cloud-client/v1beta2/quickstart.py index c5a4b9c3ebc0..3cef5fca819d 100644 --- a/language/snippets/cloud-client/v1beta2/quickstart.py +++ b/language/snippets/cloud-client/v1beta2/quickstart.py @@ -18,17 +18,25 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - from google.cloud import language + # [START beta_import_client] + # [START beta_import] + from google.cloud import language_v1beta2 + from google.cloud.language_v1beta2 import enums + from google.cloud.language_v1beta2 import types + # [END beta_import] - # Instantiates a client with they v1beta2 version - language_client = language.Client(api_version='v1beta2') + # Instantiates a client with the v1beta2 version + client = language_v1beta2.LanguageServiceClient() + # [END beta_import_client] # The text to analyze - text = 'Hallo Welt!' - document = language_client.document_from_text(text, language='DE') - + text = u'Hallo Welt!' + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT, + language='de') # Detects the sentiment of the text - sentiment = document.analyze_sentiment().sentiment + sentiment = client.analyze_sentiment(document).document_sentiment print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index d44360febc26..8cd367eac3a0 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1,2 +1 @@ -gapic-google-cloud-language-v1beta2==0.15.3 -google-cloud-language==0.25.0 +google-cloud-language==0.26.1 diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index 02d0d8e5b314..af7836ba3c64 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -24,64 +24,69 @@ import argparse import sys -from google.cloud import language -from google.cloud.gapic.language.v1beta2 import enums -from google.cloud.gapic.language.v1beta2 import language_service_client -from google.cloud.proto.language.v1beta2 import language_service_pb2 +from google.cloud import language_v1beta2 +from google.cloud.language_v1beta2 import enums +from google.cloud.language_v1beta2 import types import six def sentiment_text(text): """Detects sentiment in the text.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment - print(u'Score: {}'.format(sentiment.score)) - print(u'Magnitude: {}'.format(sentiment.magnitude)) + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_gcs_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment - print(u'Score: {}'.format(sentiment.score)) - print(u'Magnitude: {}'.format(sentiment.magnitude)) + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) def entities_text(text): """Detects entities in the text.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects entities in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: - print(u'=' * 20) + print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('type', entity.type)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -90,75 +95,92 @@ def entities_text(text): def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_gcs_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) def syntax_text(text): """Detects syntax in the text.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_gcs_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) +# [START def_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" - language_client = language_service_client.LanguageServiceClient() - document = language_service_pb2.Document() + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') - document.content = text.encode('utf-8') - document.type = enums.Document.Type.PLAIN_TEXT + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + # Pass in encoding type to get useful offsets in the response. encoding = enums.EncodingType.UTF32 if sys.maxunicode == 65535: encoding = enums.EncodingType.UTF16 - result = language_client.analyze_entity_sentiment( - document, encoding) + result = client.analyze_entity_sentiment(document, encoding) for entity in result.entities: print('Mentions: ') @@ -171,22 +193,23 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END def_entity_sentiment_text] def entity_sentiment_file(gcs_uri): """Detects entity sentiment in a Google Cloud Storage file.""" - language_client = language_service_client.LanguageServiceClient() - document = language_service_pb2.Document() + client = language_v1beta2.LanguageServiceClient() - document.gcs_content_uri = gcs_uri - document.type = enums.Document.Type.PLAIN_TEXT + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + # Pass in encoding type to get useful offsets in the response. encoding = enums.EncodingType.UTF32 if sys.maxunicode == 65535: encoding = enums.EncodingType.UTF16 - result = language_client.analyze_entity_sentiment( - document, encoding) + result = client.analyze_entity_sentiment(document, encoding) for entity in result.entities: print(u'Name: "{}"'.format(entity.name)) diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 1d3c69aeae7e..8cd367eac3a0 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.25.0 +google-cloud-language==0.26.1 diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index c574c31827be..8ac8575b08ec 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -18,12 +18,15 @@ import argparse from google.cloud import language +from google.cloud.language import enums +from google.cloud.language import types # [END sentiment_tutorial_import] +# [START def_print_result] def print_result(annotations): - score = annotations.sentiment.score - magnitude = annotations.sentiment.magnitude + score = annotations.document_sentiment.score + magnitude = annotations.document_sentiment.magnitude for index, sentence in enumerate(annotations.sentences): sentence_sentiment = sentence.sentiment.score @@ -33,27 +36,26 @@ def print_result(annotations): print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) return 0 - - print('Sentiment: score of {} with magnitude of {}'.format( - score, magnitude)) - return 0 +# [END def_print_result] +# [START def_analyze] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" - language_client = language.Client() + client = language.LanguageServiceClient() with open(movie_review_filename, 'r') as review_file: # Instantiates a plain text document. - document = language_client.document_from_html(review_file.read()) + content = review_file.read() - # Detects sentiment in the document. - annotations = document.annotate_text(include_sentiment=True, - include_syntax=False, - include_entities=False) + document = types.Document( + content=content, + type=enums.Document.Type.PLAIN_TEXT) + annotations = client.analyze_sentiment(document=document) - # Print the results - print_result(annotations) + # Print the results + print_result(annotations) +# [END def_analyze] if __name__ == '__main__': From 01ff509a67328e0c393150cf753371bc2ae48515 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 3 Aug 2017 09:05:36 -0700 Subject: [PATCH 077/323] Auto-update dependencies. [(#1048)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1048) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 992307a00727..bd9870bd0ac9 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.18.2 +requests==2.18.3 From 58c697a8d62f9f19fd852f74cf9cb9c1ed1e7ecc Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Thu, 3 Aug 2017 16:48:10 -0700 Subject: [PATCH 078/323] move region tags so that the beta page only include codes from the relevant file --- language/snippets/cloud-client/v1beta2/quickstart.py | 4 ---- language/snippets/cloud-client/v1beta2/snippets.py | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/cloud-client/v1beta2/quickstart.py b/language/snippets/cloud-client/v1beta2/quickstart.py index 3cef5fca819d..b19d11b7bc17 100644 --- a/language/snippets/cloud-client/v1beta2/quickstart.py +++ b/language/snippets/cloud-client/v1beta2/quickstart.py @@ -18,16 +18,12 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - # [START beta_import_client] - # [START beta_import] from google.cloud import language_v1beta2 from google.cloud.language_v1beta2 import enums from google.cloud.language_v1beta2 import types - # [END beta_import] # Instantiates a client with the v1beta2 version client = language_v1beta2.LanguageServiceClient() - # [END beta_import_client] # The text to analyze text = u'Hallo Welt!' diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index af7836ba3c64..0ea7352aeb95 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -24,9 +24,11 @@ import argparse import sys +# [START beta_import] from google.cloud import language_v1beta2 from google.cloud.language_v1beta2 import enums from google.cloud.language_v1beta2 import types +# [END beta_import] import six @@ -166,7 +168,9 @@ def syntax_file(gcs_uri): # [START def_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" + # [START beta_client] client = language_v1beta2.LanguageServiceClient() + # [END beta_client] if isinstance(text, six.binary_type): text = text.decode('utf-8') From 104b4efa381ca876dd38399a3b9b5c3af5c4de89 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 7 Aug 2017 10:04:55 -0700 Subject: [PATCH 079/323] Auto-update dependencies. [(#1055)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1055) * Auto-update dependencies. * Explicitly use latest bigtable client Change-Id: Id71e9e768f020730e4ca9514a0d7ebaa794e7d9e * Revert language update for now Change-Id: I8867f154e9a5aae00d0047c9caf880e5e8f50c53 * Remove pdb. smh Change-Id: I5ff905fadc026eebbcd45512d4e76e003e3b2b43 --- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 8cd367eac3a0..743bbe792126 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.26.1 +google-cloud-language==0.27.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 8cd367eac3a0..743bbe792126 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.26.1 +google-cloud-language==0.27.0 From 0b1a10de1e08d69e8efcb049c2b464949f19ef8d Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 7 Aug 2017 12:35:23 -0700 Subject: [PATCH 080/323] Update langauge test case to deal with changing server output Change-Id: Id4e773d2fed4a8934876535987e2c703a8504c26 --- language/snippets/cloud-client/v1/snippets_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/language/snippets/cloud-client/v1/snippets_test.py b/language/snippets/cloud-client/v1/snippets_test.py index 080d5dd5d5bb..8bbdaf9deb32 100644 --- a/language/snippets/cloud-client/v1/snippets_test.py +++ b/language/snippets/cloud-client/v1/snippets_test.py @@ -21,15 +21,15 @@ def test_sentiment_text(capsys): - snippets.sentiment_text('President Obama is speaking at the White House.') + snippets.sentiment_text('No! God please, no!') out, _ = capsys.readouterr() - assert 'Score: 0.2' in out + assert 'Score: ' in out def test_sentiment_file(capsys): snippets.sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() - assert 'Score: 0.2' in out + assert 'Score: ' in out def test_entities_text(capsys): From 7e10f05231ee6e28cd4e559f3a951abae4ede0c2 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 8 Aug 2017 08:51:01 -0700 Subject: [PATCH 081/323] Auto-update dependencies. [(#1057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1057) --- language/snippets/cloud-client/v1/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 8cd367eac3a0..743bbe792126 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.26.1 +google-cloud-language==0.27.0 From 31d3b5bd0dd94846afdf7c96b38f9c90d623d877 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 8 Aug 2017 15:55:47 -0700 Subject: [PATCH 082/323] show entity type name [(#1062)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1062) * show entity type name * update v1beta2 * correct indent --- language/snippets/cloud-client/v1/snippets.py | 12 ++++++++++-- language/snippets/cloud-client/v1beta2/snippets.py | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index d485752b09c5..704c6347eac0 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -94,10 +94,14 @@ def entities_text(text): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -120,10 +124,14 @@ def entities_file(gcs_uri): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index 0ea7352aeb95..0ea15f7fc42d 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -85,10 +85,14 @@ def entities_text(text): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -108,10 +112,14 @@ def entities_file(gcs_uri): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', From ebba1f80113d9acfb3b43bfc8992a9701ce0a321 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 16 Aug 2017 09:34:13 -0700 Subject: [PATCH 083/323] Auto-update dependencies. [(#1073)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1073) --- language/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index bd9870bd0ac9..b072771d522a 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.18.3 +requests==2.18.4 From 0ca3eec75ec215cdf3a2b6f5829f23870babd756 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 16 Aug 2017 09:51:07 -0700 Subject: [PATCH 084/323] Fix flaky movie_nl tests Change-Id: I4922637173048627f38b507588a4f30a5d490212 --- language/snippets/movie_nl/main_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/movie_nl/main_test.py b/language/snippets/movie_nl/main_test.py index 927639eb2b21..7e33cefd3e30 100644 --- a/language/snippets/movie_nl/main_test.py +++ b/language/snippets/movie_nl/main_test.py @@ -70,18 +70,18 @@ def test_process_movie_reviews(): entities = [json.loads(entity) for entity in entities] # assert sentiments - assert sentiments[0].get('sentiment') == 0.9 + assert sentiments[0].get('sentiment') > 0 assert sentiments[0].get('label') == 1 - assert sentiments[1].get('sentiment') == 0.9 + assert sentiments[1].get('sentiment') > 0 assert sentiments[1].get('label') == 1 # assert entities assert len(entities) == 1 assert entities[0].get('name') == 'Tom Cruise' assert (entities[0].get('wiki_url') == - 'http://en.wikipedia.org/wiki/Tom_Cruise') - assert entities[0].get('sentiment') == 1.8 + 'https://en.wikipedia.org/wiki/Tom_Cruise') + assert entities[0].get('sentiment') > 0 def test_rank_positive_entities(capsys): From fe9be31fd422240893a26fa03fc945529e1c9f33 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 29 Aug 2017 16:53:02 -0700 Subject: [PATCH 085/323] Auto-update dependencies. [(#1093)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1093) * Auto-update dependencies. * Fix storage notification poll sample Change-Id: I6afbc79d15e050531555e4c8e51066996717a0f3 * Fix spanner samples Change-Id: I40069222c60d57e8f3d3878167591af9130895cb * Drop coverage because it's not useful Change-Id: Iae399a7083d7866c3c7b9162d0de244fbff8b522 * Try again to fix flaky logging test Change-Id: I6225c074701970c17c426677ef1935bb6d7e36b4 --- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 743bbe792126..0aa7a714776c 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.27.0 +google-cloud-language==0.28.0 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 743bbe792126..0aa7a714776c 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.27.0 +google-cloud-language==0.28.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 743bbe792126..0aa7a714776c 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.27.0 +google-cloud-language==0.28.0 From 0c399600e344183ed62cf9fbae893fc753b3c7df Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 30 Aug 2017 10:15:58 -0700 Subject: [PATCH 086/323] Auto-update dependencies. [(#1094)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1094) * Auto-update dependencies. * Relax assertions in the ocr_nl sample Change-Id: I6d37e5846a8d6dd52429cb30d501f448c52cbba1 * Drop unused logging apiary samples Change-Id: I545718283773cb729a5e0def8a76ebfa40829d51 --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/main_test.py | 2 -- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 4f77d6936d70..28ef89127f4b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index b072771d522a..69ae9a1934bc 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py index afaf2e16576c..e4bf2b5174fb 100755 --- a/language/snippets/ocr_nl/main_test.py +++ b/language/snippets/ocr_nl/main_test.py @@ -66,7 +66,6 @@ def test_text_returns_entities(): etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'holmes' - assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes' def test_entities_list(): @@ -80,7 +79,6 @@ def test_entities_list(): etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'bennet' - assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet' def test_main(tmpdir, capsys): diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 4f77d6936d70..28ef89127f4b 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 4f77d6936d70..28ef89127f4b 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 4f77d6936d70..28ef89127f4b 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 From 530dd43e3e6fcbc059caa3565153d2d65fabf9ac Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 15 Sep 2017 13:36:20 -0700 Subject: [PATCH 087/323] Language classify [(#1095)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1095) * add classify text samples and tests * use longer text * move entity sentiment to v1 * flake * year when first written * year first written --- language/snippets/cloud-client/v1/snippets.py | 74 ++++++++++++++++ .../snippets/cloud-client/v1/snippets_test.py | 23 ++++- .../v1beta2/resources/android_text.txt | 1 + .../snippets/cloud-client/v1beta2/snippets.py | 86 +++++++------------ .../cloud-client/v1beta2/snippets_test.py | 26 +++--- 5 files changed, 141 insertions(+), 69 deletions(-) create mode 100644 language/snippets/cloud-client/v1beta2/resources/android_text.txt diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 704c6347eac0..e13fc7dd6c8b 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -22,10 +22,12 @@ """ import argparse +import sys from google.cloud import language from google.cloud.language import enums from google.cloud.language import types + import six @@ -192,12 +194,80 @@ def syntax_file(gcs_uri): # [END def_syntax_file] +# [START def_entity_sentiment_text] +def entity_sentiment_text(text): + """Detects entity sentiment in the provided text.""" + client = language.LanguageServiceClient() + + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print('Mentions: ') + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END def_entity_sentiment_text] + + +def entity_sentiment_file(gcs_uri): + """Detects entity sentiment in a Google Cloud Storage file.""" + client = language.LanguageServiceClient() + + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') + sentiment_entities_text_parser = subparsers.add_parser( + 'sentiment-entities-text', help=entity_sentiment_text.__doc__) + sentiment_entities_text_parser.add_argument('text') + + sentiment_entities_file_parser = subparsers.add_parser( + 'sentiment-entities-file', help=entity_sentiment_file.__doc__) + sentiment_entities_file_parser.add_argument('gcs_uri') + sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) sentiment_text_parser.add_argument('text') @@ -236,3 +306,7 @@ def syntax_file(gcs_uri): syntax_text(args.text) elif args.command == 'syntax-file': syntax_file(args.gcs_uri) + elif args.command == 'sentiment-entities-text': + entity_sentiment_text(args.text) + elif args.command == 'sentiment-entities-file': + entity_sentiment_file(args.gcs_uri) diff --git a/language/snippets/cloud-client/v1/snippets_test.py b/language/snippets/cloud-client/v1/snippets_test.py index 8bbdaf9deb32..168701dc6667 100644 --- a/language/snippets/cloud-client/v1/snippets_test.py +++ b/language/snippets/cloud-client/v1/snippets_test.py @@ -1,4 +1,5 @@ -# Copyright 2016 Google, Inc. +# -*- coding: utf-8 -*- +# Copyright 2017 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -56,3 +57,23 @@ def test_syntax_file(capsys): snippets.syntax_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'NOUN: President' in out + + +def test_sentiment_entities_text(capsys): + snippets.entity_sentiment_text( + 'President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_file(capsys): + snippets.entity_sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_utf(capsys): + snippets.entity_sentiment_text( + 'foo→bar') + out, _ = capsys.readouterr() + assert 'Begin Offset : 4' in out diff --git a/language/snippets/cloud-client/v1beta2/resources/android_text.txt b/language/snippets/cloud-client/v1beta2/resources/android_text.txt new file mode 100644 index 000000000000..c05c452dc008 --- /dev/null +++ b/language/snippets/cloud-client/v1beta2/resources/android_text.txt @@ -0,0 +1 @@ +Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets. diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index 0ea15f7fc42d..3ccc2933cd7f 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2017 Google, Inc. +# Copyright 2016 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ """ import argparse -import sys # [START beta_import] from google.cloud import language_v1beta2 @@ -173,9 +172,9 @@ def syntax_file(gcs_uri): token.text.content)) -# [START def_entity_sentiment_text] -def entity_sentiment_text(text): - """Detects entity sentiment in the provided text.""" +# [START def_classify_text] +def classify_text(text): + """Classifies the provided text.""" # [START beta_client] client = language_v1beta2.LanguageServiceClient() # [END beta_client] @@ -187,52 +186,31 @@ def entity_sentiment_text(text): content=text.encode('utf-8'), type=enums.Document.Type.PLAIN_TEXT) - # Pass in encoding type to get useful offsets in the response. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print('Mentions: ') - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] - - -def entity_sentiment_file(gcs_uri): - """Detects entity sentiment in a Google Cloud Storage file.""" + categories = client.classify_text(document).categories + + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_text] + + +# [START def_classify_file] +def classify_file(gcs_uri): + """Classifies the text in a Google Cloud Storage file.""" client = language_v1beta2.LanguageServiceClient() document = types.Document( gcs_content_uri=gcs_uri, type=enums.Document.Type.PLAIN_TEXT) - # Pass in encoding type to get useful offsets in the response. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) + categories = client.classify_text(document).categories - for entity in result.entities: - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_file] if __name__ == '__main__': @@ -241,13 +219,13 @@ def entity_sentiment_file(gcs_uri): formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') - sentiment_entities_text_parser = subparsers.add_parser( - 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - sentiment_entities_text_parser.add_argument('text') + classify_text_parser = subparsers.add_parser( + 'classify-text', help=classify_text.__doc__) + classify_text_parser.add_argument('text') - sentiment_entities_file_parser = subparsers.add_parser( - 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - sentiment_entities_file_parser.add_argument('gcs_uri') + classify_text_parser = subparsers.add_parser( + 'classify-file', help=classify_file.__doc__) + classify_text_parser.add_argument('gcs_uri') sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) @@ -287,7 +265,7 @@ def entity_sentiment_file(gcs_uri): syntax_text(args.text) elif args.command == 'syntax-file': syntax_file(args.gcs_uri) - elif args.command == 'sentiment-entities-text': - entity_sentiment_text(args.text) - elif args.command == 'sentiment-entities-file': - entity_sentiment_file(args.gcs_uri) + elif args.command == 'classify-text': + classify_text(args.text) + elif args.command == 'classify-file': + classify_file(args.gcs_uri) diff --git a/language/snippets/cloud-client/v1beta2/snippets_test.py b/language/snippets/cloud-client/v1beta2/snippets_test.py index e6db221780b0..d440136b5010 100644 --- a/language/snippets/cloud-client/v1beta2/snippets_test.py +++ b/language/snippets/cloud-client/v1beta2/snippets_test.py @@ -19,6 +19,7 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) +LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) def test_sentiment_text(capsys): @@ -68,21 +69,18 @@ def test_syntax_file(capsys): assert 'NOUN: President' in out -def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text( - 'President Obama is speaking at the White House.') +def test_classify_text(capsys): + snippets.classify_text( + 'Android is a mobile operating system developed by Google, ' + 'based on the Linux kernel and designed primarily for touchscreen ' + 'mobile devices such as smartphones and tablets.') out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'Content : White House' in out + assert 'name' in out + assert '/Computers & Electronics' in out -def test_sentiment_entities_utf(capsys): - snippets.entity_sentiment_text( - 'foo→bar') +def test_classify_file(capsys): + snippets.classify_file(LONG_TEST_FILE_URL) out, _ = capsys.readouterr() - assert 'Begin Offset : 4' in out + assert 'name' in out + assert '/Computers & Electronics' in out From d73bec2fcc8d7a34a2ce4f3d618d8e15899fc1db Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 15 Sep 2017 13:49:00 -0700 Subject: [PATCH 088/323] Client version update [(#1117)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1117) * correct client version * update client version --- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 0aa7a714776c..afc8ed0adf2c 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.28.0 +google-cloud-language==0.29.0 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 0aa7a714776c..afc8ed0adf2c 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.28.0 +google-cloud-language==0.29.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 0aa7a714776c..afc8ed0adf2c 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.28.0 +google-cloud-language==0.29.0 From 6ad2fd6ca4a2827d09000897e33ebfea35ed7b73 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 18 Sep 2017 11:04:05 -0700 Subject: [PATCH 089/323] Update all generated readme auth instructions [(#1121)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1121) Change-Id: I03b5eaef8b17ac3dc3c0339fd2c7447bd3e11bd2 --- language/snippets/api/README.rst | 32 +++---------------- language/snippets/cloud-client/v1/README.rst | 32 +++---------------- .../snippets/cloud-client/v1beta2/README.rst | 32 +++---------------- language/snippets/tutorial/README.rst | 32 +++---------------- 4 files changed, 20 insertions(+), 108 deletions(-) diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index f757fea84396..c975769aaba1 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -17,34 +17,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index a3ee4b7f641f..165add01cad2 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -21,34 +21,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index 77df4ffbec94..8640369e47d0 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -21,34 +21,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst index 5b862ead5c6a..f1ea6ad97bbf 100644 --- a/language/snippets/tutorial/README.rst +++ b/language/snippets/tutorial/README.rst @@ -17,34 +17,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ From 3114217e43bb9aee51202747f97e162413c07237 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 19 Sep 2017 09:30:32 -0700 Subject: [PATCH 090/323] Update readme [(#1124)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1124) * update readme * keep entity sentiment in v1beta2 as well * update readme --- language/snippets/cloud-client/v1/README.rst | 9 ++- .../snippets/cloud-client/v1beta2/README.rst | 7 +- .../snippets/cloud-client/v1beta2/snippets.py | 79 ++++++++++++++++++- .../cloud-client/v1beta2/snippets_test.py | 20 +++++ 4 files changed, 109 insertions(+), 6 deletions(-) diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 165add01cad2..8640369e47d0 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -76,7 +76,7 @@ To run this sample: $ python snippets.py usage: snippets.py [-h] - {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... This application demonstrates how to perform basic operations with the @@ -86,7 +86,12 @@ To run this sample: https://cloud.google.com/natural-language/docs. positional arguments: - {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + sentiment-entities-text + Detects entity sentiment in the provided text. + sentiment-entities-file + Detects entity sentiment in a Google Cloud Storage + file. sentiment-text Detects sentiment in the text. sentiment-file Detects sentiment in the file located in Google Cloud Storage. diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index 8640369e47d0..dc3b85c4be5a 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -76,7 +76,7 @@ To run this sample: $ python snippets.py usage: snippets.py [-h] - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... This application demonstrates how to perform basic operations with the @@ -86,7 +86,10 @@ To run this sample: https://cloud.google.com/natural-language/docs. positional arguments: - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + classify-text Classifies content categories of the provided text. + classify-file Classifies content categories of the text in a Google + Cloud Storage file. sentiment-entities-text Detects entity sentiment in the provided text. sentiment-entities-file diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py index 3ccc2933cd7f..abf16ada560d 100644 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ b/language/snippets/cloud-client/v1beta2/snippets.py @@ -22,6 +22,7 @@ """ import argparse +import sys # [START beta_import] from google.cloud import language_v1beta2 @@ -125,6 +126,66 @@ def entities_file(gcs_uri): entity.metadata.get('wikipedia_url', '-'))) +# [START def_entity_sentiment_text] +def entity_sentiment_text(text): + """Detects entity sentiment in the provided text.""" + client = language_v1beta2.LanguageServiceClient() + + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print('Mentions: ') + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END def_entity_sentiment_text] + + +def entity_sentiment_file(gcs_uri): + """Detects entity sentiment in a Google Cloud Storage file.""" + client = language_v1beta2.LanguageServiceClient() + + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) + + def syntax_text(text): """Detects syntax in the text.""" client = language_v1beta2.LanguageServiceClient() @@ -174,7 +235,7 @@ def syntax_file(gcs_uri): # [START def_classify_text] def classify_text(text): - """Classifies the provided text.""" + """Classifies content categories of the provided text.""" # [START beta_client] client = language_v1beta2.LanguageServiceClient() # [END beta_client] @@ -197,7 +258,9 @@ def classify_text(text): # [START def_classify_file] def classify_file(gcs_uri): - """Classifies the text in a Google Cloud Storage file.""" + """Classifies content categories of the text in a Google Cloud Storage + file. + """ client = language_v1beta2.LanguageServiceClient() document = types.Document( @@ -227,6 +290,14 @@ def classify_file(gcs_uri): 'classify-file', help=classify_file.__doc__) classify_text_parser.add_argument('gcs_uri') + sentiment_entities_text_parser = subparsers.add_parser( + 'sentiment-entities-text', help=entity_sentiment_text.__doc__) + sentiment_entities_text_parser.add_argument('text') + + sentiment_entities_file_parser = subparsers.add_parser( + 'sentiment-entities-file', help=entity_sentiment_file.__doc__) + sentiment_entities_file_parser.add_argument('gcs_uri') + sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) sentiment_text_parser.add_argument('text') @@ -265,6 +336,10 @@ def classify_file(gcs_uri): syntax_text(args.text) elif args.command == 'syntax-file': syntax_file(args.gcs_uri) + elif args.command == 'sentiment-entities-text': + entity_sentiment_text(args.text) + elif args.command == 'sentiment-entities-file': + entity_sentiment_file(args.gcs_uri) elif args.command == 'classify-text': classify_text(args.text) elif args.command == 'classify-file': diff --git a/language/snippets/cloud-client/v1beta2/snippets_test.py b/language/snippets/cloud-client/v1beta2/snippets_test.py index d440136b5010..5924ffb49176 100644 --- a/language/snippets/cloud-client/v1beta2/snippets_test.py +++ b/language/snippets/cloud-client/v1beta2/snippets_test.py @@ -69,6 +69,26 @@ def test_syntax_file(capsys): assert 'NOUN: President' in out +def test_sentiment_entities_text(capsys): + snippets.entity_sentiment_text( + 'President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_file(capsys): + snippets.entity_sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_utf(capsys): + snippets.entity_sentiment_text( + 'foo→bar') + out, _ = capsys.readouterr() + assert 'Begin Offset : 4' in out + + def test_classify_text(capsys): snippets.classify_text( 'Android is a mobile operating system developed by Google, ' From 5a642616f57868f14ba28983b19555c0bb295953 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 19 Sep 2017 09:33:15 -0700 Subject: [PATCH 091/323] Classify tutorial [(#1120)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1120) * first version of classify_text tutorial * addressing comments * classify text tutorial * update client version * year first written * use auto generated README * add README.rst.in and README.rst * addressing review comments * add tests for index and query * import order * add numpy to requirements --- language/snippets/classify_text/README.rst | 126 +++++++++ language/snippets/classify_text/README.rst.in | 26 ++ .../classify_text/classify_text_tutorial.py | 261 ++++++++++++++++++ .../classify_text_tutorial_test.py | 90 ++++++ .../snippets/classify_text/requirements.txt | 2 + .../classify_text/resources/query_text1.txt | 1 + .../classify_text/resources/query_text2.txt | 1 + .../classify_text/resources/query_text3.txt | 1 + .../classify_text/resources/texts/android.txt | 1 + .../resources/texts/cat_in_the_hat.txt | 1 + .../resources/texts/cloud_computing.txt | 1 + .../classify_text/resources/texts/eclipse.txt | 1 + .../resources/texts/eclipse_of_the_sun.txt | 1 + .../classify_text/resources/texts/email.txt | 1 + .../classify_text/resources/texts/gcp.txt | 1 + .../classify_text/resources/texts/gmail.txt | 1 + .../classify_text/resources/texts/google.txt | 1 + .../resources/texts/harry_potter.txt | 1 + .../classify_text/resources/texts/matilda.txt | 1 + .../resources/texts/mobile_phone.txt | 1 + .../classify_text/resources/texts/mr_fox.txt | 1 + .../resources/texts/wireless.txt | 1 + 22 files changed, 522 insertions(+) create mode 100644 language/snippets/classify_text/README.rst create mode 100644 language/snippets/classify_text/README.rst.in create mode 100644 language/snippets/classify_text/classify_text_tutorial.py create mode 100644 language/snippets/classify_text/classify_text_tutorial_test.py create mode 100644 language/snippets/classify_text/requirements.txt create mode 100644 language/snippets/classify_text/resources/query_text1.txt create mode 100644 language/snippets/classify_text/resources/query_text2.txt create mode 100644 language/snippets/classify_text/resources/query_text3.txt create mode 100644 language/snippets/classify_text/resources/texts/android.txt create mode 100644 language/snippets/classify_text/resources/texts/cat_in_the_hat.txt create mode 100644 language/snippets/classify_text/resources/texts/cloud_computing.txt create mode 100644 language/snippets/classify_text/resources/texts/eclipse.txt create mode 100644 language/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt create mode 100644 language/snippets/classify_text/resources/texts/email.txt create mode 100644 language/snippets/classify_text/resources/texts/gcp.txt create mode 100644 language/snippets/classify_text/resources/texts/gmail.txt create mode 100644 language/snippets/classify_text/resources/texts/google.txt create mode 100644 language/snippets/classify_text/resources/texts/harry_potter.txt create mode 100644 language/snippets/classify_text/resources/texts/matilda.txt create mode 100644 language/snippets/classify_text/resources/texts/mobile_phone.txt create mode 100644 language/snippets/classify_text/resources/texts/mr_fox.txt create mode 100644 language/snippets/classify_text/resources/texts/wireless.txt diff --git a/language/snippets/classify_text/README.rst b/language/snippets/classify_text/README.rst new file mode 100644 index 000000000000..0a61591bc22e --- /dev/null +++ b/language/snippets/classify_text/README.rst @@ -0,0 +1,126 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers. + +This tutorial demostrates how to use the `classify_text` method to classify content category of text files, and use the result to compare texts by their similarity to each other. See the `tutorial page`_ for details about this sample. + +.. _tutorial page: https://cloud.google.com/natural-language/docs/classify-text-tutorial + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Classify Text Tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python classify_text_tutorial.py + + usage: classify_text_tutorial.py [-h] + {classify,index,query,query-category} ... + + Using the classify_text method to cluster texts. + + positional arguments: + {classify,index,query,query-category} + classify Classify the input text into categories. + index Classify each text file in a directory and write the + results to the index_file. + query Find the indexed files that are the most similar to + the query text. + query-category Find the indexed files that are the most similar to + the query label. The list of all available labels: + https://cloud.google.com/natural- + language/docs/categories + + optional arguments: + -h, --help show this help message and exit + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/classify_text/README.rst.in b/language/snippets/classify_text/README.rst.in new file mode 100644 index 000000000000..42e8f061a5d7 --- /dev/null +++ b/language/snippets/classify_text/README.rst.in @@ -0,0 +1,26 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers. + + + This tutorial demostrates how to use the `classify_text` method to classify content category of text files, and use the result to compare texts by their similarity to each other. See the `tutorial page`_ for details about this sample. + + + .. _tutorial page: https://cloud.google.com/natural-language/docs/classify-text-tutorial + +setup: +- auth +- install_deps + +samples: +- name: Classify Text Tutorial + file: classify_text_tutorial.py + show_help: true + +cloud_client_library: true diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py new file mode 100644 index 000000000000..08a03e98212e --- /dev/null +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python + +# Copyright 2017, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START classify_text_tutorial] +"""Using the classify_text method to find content categories of text files, +Then use the content category labels to compare text similarity. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/docs/classify-text-tutorial. +""" + +# [START classify_text_tutorial_import] +import argparse +import io +import json +import os + +from google.cloud import language_v1beta2 +from google.cloud.language_v1beta2 import enums +from google.cloud.language_v1beta2 import types + +import numpy +import six +# [END classify_text_tutorial_import] + + +# [START def_classify] +def classify(text, verbose=True): + """Classify the input text into categories. """ + + language_client = language_v1beta2.LanguageServiceClient() + + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) + response = language_client.classify_text(document) + categories = response.categories + + result = {} + + for category in categories: + # Turn the categories into a dictionary of the form: + # {category.name: category.confidence}, so that they can + # be treated as a sparse vector. + result[category.name] = category.confidence + + if verbose: + print(text) + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('category', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) + + return result +# [END def_classify] + + +# [START def_index] +def index(path, index_file): + """Classify each text file in a directory and write + the results to the index_file. + """ + + result = {} + for filename in os.listdir(path): + file_path = os.path.join(path, filename) + + if not os.path.isfile(file_path): + continue + + try: + with io.open(file_path, 'r') as f: + text = f.read() + categories = classify(text, verbose=False) + + result[filename] = categories + except: + print('Failed to process {}'.format(file_path)) + + with io.open(index_file, 'w') as f: + f.write(unicode(json.dumps(result))) + + print('Texts indexed in file: {}'.format(index_file)) + return result +# [END def_index] + + +# [START def_split_labels] +def split_labels(categories): + """The category labels are of the form "/a/b/c" up to three levels, + for example "/Computers & Electronics/Software", and these labels + are used as keys in the categories dictionary, whose values are + confidence scores. + + The split_labels function splits the keys into individual levels + while duplicating the confidence score, which allows a natural + boost in how we calculate similarity when more levels are in common. + + Example: + If we have + + x = {"/a/b/c": 0.5} + y = {"/a/b": 0.5} + z = {"/a": 0.5} + + Then x and y are considered more similar than y and z. + """ + _categories = {} + for name, confidence in six.iteritems(categories): + labels = [label for label in name.split('/') if label] + for label in labels: + _categories[label] = confidence + + return _categories +# [END def_split_labels] + + +# [START def_similarity] +def similarity(categories1, categories2): + """Cosine similarity of the categories treated as sparse vectors.""" + categories1 = split_labels(categories1) + categories2 = split_labels(categories2) + + norm1 = numpy.linalg.norm(categories1.values()) + norm2 = numpy.linalg.norm(categories2.values()) + + # Return the smallest possible similarity if either categories is empty. + if norm1 == 0 or norm2 == 0: + return 0.0 + + # Compute the cosine similarity. + dot = 0.0 + for label, confidence in six.iteritems(categories1): + dot += confidence * categories2.get(label, 0.0) + + return dot / (norm1 * norm2) +# [END def_similarity] + + +# [START def_query] +def query(index_file, text, n_top=3): + """Find the indexed files that are the most similar to + the query text. + """ + + with io.open(index_file, 'r') as f: + index = json.load(f) + + # Get the categories of the query text. + query_categories = classify(text, verbose=False) + + similarities = [] + for filename, categories in six.iteritems(index): + similarities.append( + (filename, similarity(query_categories, categories))) + + similarities = sorted(similarities, key=lambda p: p[1], reverse=True) + + print('=' * 20) + print('Query: {}\n'.format(text)) + for category, confidence in six.iteritems(query_categories): + print('\tCategory: {}, confidence: {}'.format(category, confidence)) + print('\nMost similar {} indexed texts:'.format(n_top)) + for filename, sim in similarities[:n_top]: + print('\tFilename: {}'.format(filename)) + print('\tSimilarity: {}'.format(sim)) + print('\n') + + return similarities +# [END def_query] + + +# [START def_query_category] +def query_category(index_file, category_string, n_top=3): + """Find the indexed files that are the most similar to + the query label. + + The list of all available labels: + https://cloud.google.com/natural-language/docs/categories + """ + + with io.open(index_file, 'r') as f: + index = json.load(f) + + # Make the category_string into a dictionary so that it is + # of the same format as what we get by calling classify. + query_categories = {category_string: 1.0} + + similarities = [] + for filename, categories in six.iteritems(index): + similarities.append( + (filename, similarity(query_categories, categories))) + + similarities = sorted(similarities, key=lambda p: p[1], reverse=True) + + print('=' * 20) + print('Query: {}\n'.format(category_string)) + print('\nMost similar {} indexed texts:'.format(n_top)) + for filename, sim in similarities[:n_top]: + print('\tFilename: {}'.format(filename)) + print('\tSimilarity: {}'.format(sim)) + print('\n') + + return similarities +# [END def_query_category] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + classify_parser = subparsers.add_parser( + 'classify', help=classify.__doc__) + classify_parser.add_argument( + 'text', help='The text to be classified. ' + 'The text needs to have at least 20 tokens.') + index_parser = subparsers.add_parser( + 'index', help=index.__doc__) + index_parser.add_argument( + 'path', help='The directory that contains ' + 'text files to be indexed.') + index_parser.add_argument( + '--index_file', help='Filename for the output JSON.', + default='index.json') + query_parser = subparsers.add_parser( + 'query', help=query.__doc__) + query_parser.add_argument( + 'index_file', help='Path to the index JSON file.') + query_parser.add_argument( + 'text', help='Query text.') + query_category_parser = subparsers.add_parser( + 'query-category', help=query_category.__doc__) + query_category_parser.add_argument( + 'index_file', help='Path to the index JSON file.') + query_category_parser.add_argument( + 'category', help='Query category.') + + args = parser.parse_args() + + if args.command == 'classify': + classify(args.text) + if args.command == 'index': + index(args.path, args.index_file) + if args.command == 'query': + query(args.index_file, args.text) + if args.command == 'query-category': + query_category(args.index_file, args.category) +# [END classify_text_tutorial] diff --git a/language/snippets/classify_text/classify_text_tutorial_test.py b/language/snippets/classify_text/classify_text_tutorial_test.py new file mode 100644 index 000000000000..305cf53fede5 --- /dev/null +++ b/language/snippets/classify_text/classify_text_tutorial_test.py @@ -0,0 +1,90 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import classify_text_tutorial +import pytest + + +OUTPUT = 'index.json' +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +QUERY_TEXT = """Google Home enables users to speak voice commands to interact +with services through the Home\'s intelligent personal assistant called +Google Assistant. A large number of services, both in-house and third-party, +are integrated, allowing users to listen to music, look at videos or photos, +or receive news updates entirely by voice.""" +QUERY_CATEGORY = '/Computers & Electronics/Software' + + +@pytest.fixture(scope='session') +def index_file(tmpdir_factory): + temp_file = tmpdir_factory.mktemp('tmp').join(OUTPUT) + temp_out = temp_file.strpath + classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + return temp_file + + +def test_classify(capsys): + with open(os.path.join(RESOURCES, 'query_text1.txt'), 'r') as f: + text = f.read() + classify_text_tutorial.classify(text) + out, err = capsys.readouterr() + assert 'category' in out + + +def test_index(capsys, tmpdir): + temp_dir = tmpdir.mkdir('tmp') + temp_out = temp_dir.join(OUTPUT).strpath + + classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + out, err = capsys.readouterr() + + assert OUTPUT in out + assert len(temp_dir.listdir()) == 1 + + +def test_query_text(capsys, index_file): + temp_out = index_file.strpath + + classify_text_tutorial.query(temp_out, QUERY_TEXT) + out, err = capsys.readouterr() + + assert 'Filename: cloud_computing.txt' in out + + +def test_query_category(capsys, index_file): + temp_out = index_file.strpath + + classify_text_tutorial.query_category(temp_out, QUERY_CATEGORY) + out, err = capsys.readouterr() + + assert 'Filename: cloud_computing.txt' in out + + +def test_split_labels(): + categories = {'/a/b/c': 1.0} + split_categories = {'a': 1.0, 'b': 1.0, 'c': 1.0} + assert classify_text_tutorial.split_labels(categories) == split_categories + + +def test_similarity(): + empty_categories = {} + categories1 = {'/a/b/c': 1.0, '/d/e': 1.0} + categories2 = {'/a/b': 1.0} + + assert classify_text_tutorial.similarity( + empty_categories, categories1) == 0.0 + assert classify_text_tutorial.similarity(categories1, categories1) > 0.99 + assert classify_text_tutorial.similarity(categories1, categories2) > 0 + assert classify_text_tutorial.similarity(categories1, categories2) < 1 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt new file mode 100644 index 000000000000..10069f1801ef --- /dev/null +++ b/language/snippets/classify_text/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-language==0.29.0 +numpy==1.13.1 diff --git a/language/snippets/classify_text/resources/query_text1.txt b/language/snippets/classify_text/resources/query_text1.txt new file mode 100644 index 000000000000..304727304d1d --- /dev/null +++ b/language/snippets/classify_text/resources/query_text1.txt @@ -0,0 +1 @@ +Google Home enables users to speak voice commands to interact with services through the Home's intelligent personal assistant called Google Assistant. A large number of services, both in-house and third-party, are integrated, allowing users to listen to music, look at videos or photos, or receive news updates entirely by voice. diff --git a/language/snippets/classify_text/resources/query_text2.txt b/language/snippets/classify_text/resources/query_text2.txt new file mode 100644 index 000000000000..eef573c60077 --- /dev/null +++ b/language/snippets/classify_text/resources/query_text2.txt @@ -0,0 +1 @@ +The Hitchhiker's Guide to the Galaxy is the first of five books in the Hitchhiker's Guide to the Galaxy comedy science fiction "trilogy" by Douglas Adams (with the sixth written by Eoin Colfer). \ No newline at end of file diff --git a/language/snippets/classify_text/resources/query_text3.txt b/language/snippets/classify_text/resources/query_text3.txt new file mode 100644 index 000000000000..1337d3c64770 --- /dev/null +++ b/language/snippets/classify_text/resources/query_text3.txt @@ -0,0 +1 @@ +Goodnight Moon is an American children's picture book written by Margaret Wise Brown and illustrated by Clement Hurd. It was published on September 3, 1947, and is a highly acclaimed example of a bedtime story. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/android.txt b/language/snippets/classify_text/resources/texts/android.txt new file mode 100644 index 000000000000..29dc1449c55c --- /dev/null +++ b/language/snippets/classify_text/resources/texts/android.txt @@ -0,0 +1 @@ +Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets. diff --git a/language/snippets/classify_text/resources/texts/cat_in_the_hat.txt b/language/snippets/classify_text/resources/texts/cat_in_the_hat.txt new file mode 100644 index 000000000000..bb5a853c694d --- /dev/null +++ b/language/snippets/classify_text/resources/texts/cat_in_the_hat.txt @@ -0,0 +1 @@ +The Cat in the Hat is a children's book written and illustrated by Theodor Geisel under the pen name Dr. Seuss and first published in 1957. The story centers on a tall anthropomorphic cat, who wears a red and white-striped hat and a red bow tie. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/cloud_computing.txt b/language/snippets/classify_text/resources/texts/cloud_computing.txt new file mode 100644 index 000000000000..88172adf1f46 --- /dev/null +++ b/language/snippets/classify_text/resources/texts/cloud_computing.txt @@ -0,0 +1 @@ +Cloud computing is a computing-infrastructure and software model for enabling ubiquitous access to shared pools of configurable resources (such as computer networks, servers, storage, applications and services), which can be rapidly provisioned with minimal management effort, often over the Internet. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/eclipse.txt b/language/snippets/classify_text/resources/texts/eclipse.txt new file mode 100644 index 000000000000..5d16217e520a --- /dev/null +++ b/language/snippets/classify_text/resources/texts/eclipse.txt @@ -0,0 +1 @@ +A solar eclipse (as seen from the planet Earth) is a type of eclipse that occurs when the Moon passes between the Sun and Earth, and when the Moon fully or partially blocks (occults) the Sun. diff --git a/language/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt b/language/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt new file mode 100644 index 000000000000..7236fc9d806a --- /dev/null +++ b/language/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt @@ -0,0 +1 @@ +Eclipse of the Sun is the debut novel by English author Phil Whitaker. It won the 1997 John Llewellyn Rhys Prize a Betty Trask Award in 1998, and was shortlisted for the 1997 Whitbread First Novel Award. diff --git a/language/snippets/classify_text/resources/texts/email.txt b/language/snippets/classify_text/resources/texts/email.txt new file mode 100644 index 000000000000..3d430527b755 --- /dev/null +++ b/language/snippets/classify_text/resources/texts/email.txt @@ -0,0 +1 @@ +Electronic mail (email or e-mail) is a method of exchanging messages between people using electronics. Email first entered substantial use in the 1960s and by the mid-1970s had taken the form now recognized as email. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/gcp.txt b/language/snippets/classify_text/resources/texts/gcp.txt new file mode 100644 index 000000000000..1ed09b2c758a --- /dev/null +++ b/language/snippets/classify_text/resources/texts/gcp.txt @@ -0,0 +1 @@ +Google Cloud Platform, offered by Google, is a suite of cloud computing services that runs on the same infrastructure that Google uses internally for its end-user products, such as Google Search and YouTube. Alongside a set of management tools, it provides a series of modular cloud services including computing, data storage, data analytics and machine learning. diff --git a/language/snippets/classify_text/resources/texts/gmail.txt b/language/snippets/classify_text/resources/texts/gmail.txt new file mode 100644 index 000000000000..89c9704b117c --- /dev/null +++ b/language/snippets/classify_text/resources/texts/gmail.txt @@ -0,0 +1 @@ +Gmail is a free, advertising-supported email service developed by Google. Users can access Gmail on the web and through mobile apps for Android and iOS, as well as through third-party programs that synchronize email content through POP or IMAP protocols. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/google.txt b/language/snippets/classify_text/resources/texts/google.txt new file mode 100644 index 000000000000..06828635931e --- /dev/null +++ b/language/snippets/classify_text/resources/texts/google.txt @@ -0,0 +1 @@ +Google is an American multinational technology company that specializes in Internet-related services and products. These include online advertising technologies, search, cloud computing, software, and hardware. diff --git a/language/snippets/classify_text/resources/texts/harry_potter.txt b/language/snippets/classify_text/resources/texts/harry_potter.txt new file mode 100644 index 000000000000..339c10af05a2 --- /dev/null +++ b/language/snippets/classify_text/resources/texts/harry_potter.txt @@ -0,0 +1 @@ +Harry Potter is a series of fantasy novels written by British author J. K. Rowling. The novels chronicle the life of a young wizard, Harry Potter, and his friends Hermione Granger and Ron Weasley, all of whom are students at Hogwarts School of Witchcraft and Wizardry. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/matilda.txt b/language/snippets/classify_text/resources/texts/matilda.txt new file mode 100644 index 000000000000..e1539d7ee88d --- /dev/null +++ b/language/snippets/classify_text/resources/texts/matilda.txt @@ -0,0 +1 @@ +Matilda is a book by British writer Roald Dahl. Matilda won the Children's Book Award in 1999. It was published in 1988 by Jonathan Cape in London, with 232 pages and illustrations by Quentin Blake. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/mobile_phone.txt b/language/snippets/classify_text/resources/texts/mobile_phone.txt new file mode 100644 index 000000000000..725e22ef3a91 --- /dev/null +++ b/language/snippets/classify_text/resources/texts/mobile_phone.txt @@ -0,0 +1 @@ +A mobile phone is a portable device that can make and receive calls over a radio frequency link while the user is moving within a telephone service area. The radio frequency link establishes a connection to the switching systems of a mobile phone operator, which provides access to the public switched telephone network (PSTN). \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/mr_fox.txt b/language/snippets/classify_text/resources/texts/mr_fox.txt new file mode 100644 index 000000000000..354feced2af1 --- /dev/null +++ b/language/snippets/classify_text/resources/texts/mr_fox.txt @@ -0,0 +1 @@ +Fantastic Mr Fox is a children's novel written by British author Roald Dahl. It was published in 1970, by George Allen & Unwin in the UK and Alfred A. Knopf in the U.S., with illustrations by Donald Chaffin. \ No newline at end of file diff --git a/language/snippets/classify_text/resources/texts/wireless.txt b/language/snippets/classify_text/resources/texts/wireless.txt new file mode 100644 index 000000000000..d742331c4644 --- /dev/null +++ b/language/snippets/classify_text/resources/texts/wireless.txt @@ -0,0 +1 @@ +Wireless communication, or sometimes simply wireless, is the transfer of information or power between two or more points that are not connected by an electrical conductor. The most common wireless technologies use radio waves. \ No newline at end of file From f9361c4de4c36949e23323880be9870d31d2d0c2 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 21 Sep 2017 13:40:34 -0700 Subject: [PATCH 092/323] Auto-update dependencies. [(#1133)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1133) * Auto-update dependencies. * Fix missing http library Change-Id: I99faa600f2f3f1f50f57694fc9835d7f35bda250 --- language/snippets/api/requirements.txt | 4 +++- language/snippets/movie_nl/requirements.txt | 4 +++- language/snippets/ocr_nl/requirements.txt | 4 +++- language/snippets/syntax_triples/requirements.txt | 4 +++- language/snippets/tutorial/requirements.txt | 4 +++- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 28ef89127f4b..af5ec8147bf1 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 69ae9a1934bc..e9b67954aeaf 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,2 +1,4 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 28ef89127f4b..af5ec8147bf1 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 28ef89127f4b..af5ec8147bf1 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 28ef89127f4b..af5ec8147bf1 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 From 3820973e6294adc96314ef92b1ff6c639e6651ea Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 12 Oct 2017 10:16:11 -0700 Subject: [PATCH 093/323] Added Link to Python Setup Guide [(#1158)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1158) * Update Readme.rst to add Python setup guide As requested in b/64770713. This sample is linked in documentation https://cloud.google.com/bigtable/docs/scaling, and it would make more sense to update the guide here than in the documentation. * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update install_deps.tmpl.rst * Updated readmegen scripts and re-generated related README files * Fixed the lint error --- language/snippets/api/README.rst | 5 ++++- language/snippets/cloud-client/v1/README.rst | 5 ++++- language/snippets/cloud-client/v1beta2/README.rst | 5 ++++- language/snippets/tutorial/README.rst | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index c975769aaba1..e97059a3919c 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -27,7 +27,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 8640369e47d0..cf4c07d20b99 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -31,7 +31,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index dc3b85c4be5a..f2ec309a64e0 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -31,7 +31,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst index f1ea6ad97bbf..651b219cad1d 100644 --- a/language/snippets/tutorial/README.rst +++ b/language/snippets/tutorial/README.rst @@ -27,7 +27,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. From 1e2c0915430841d46efdff8f4fbed7008daf8c23 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 23 Oct 2017 14:23:30 -0700 Subject: [PATCH 094/323] Auto-update dependencies. [(#1138)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1138) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 10069f1801ef..05ff98e10fbd 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==0.29.0 -numpy==1.13.1 +numpy==1.13.3 From 6fd82d1ab426ed18b460ceea346da8048b91de01 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 24 Oct 2017 11:28:26 -0700 Subject: [PATCH 095/323] Fix classify text tutorial Change-Id: I7d133862f2e9305c978ec6fb4c8168640f3d08ed --- language/snippets/classify_text/classify_text_tutorial.py | 4 ++-- .../snippets/classify_text/classify_text_tutorial_test.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 08a03e98212e..b5358b6572f4 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -86,11 +86,11 @@ def index(path, index_file): categories = classify(text, verbose=False) result[filename] = categories - except: + except Exception: print('Failed to process {}'.format(file_path)) with io.open(index_file, 'w') as f: - f.write(unicode(json.dumps(result))) + f.write(json.dumps(result).encode('utf-8')) print('Texts indexed in file: {}'.format(index_file)) return result diff --git a/language/snippets/classify_text/classify_text_tutorial_test.py b/language/snippets/classify_text/classify_text_tutorial_test.py index 305cf53fede5..28de0562bdba 100644 --- a/language/snippets/classify_text/classify_text_tutorial_test.py +++ b/language/snippets/classify_text/classify_text_tutorial_test.py @@ -13,9 +13,10 @@ import os -import classify_text_tutorial import pytest +import classify_text_tutorial + OUTPUT = 'index.json' RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') From c049a09b887aa33c97d64fec00af96a0a84ad5a3 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 24 Oct 2017 12:07:18 -0700 Subject: [PATCH 096/323] Fix classify text tutorial Change-Id: Ib86df7cf37588b7a7fc0c7f4ad4fc70548152354 --- language/snippets/classify_text/classify_text_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index b5358b6572f4..1e1605e1ac87 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -89,8 +89,8 @@ def index(path, index_file): except Exception: print('Failed to process {}'.format(file_path)) - with io.open(index_file, 'w') as f: - f.write(json.dumps(result).encode('utf-8')) + with io.open(index_file, 'w', encoding='utf-8') as f: + f.write(json.dumps(result)) print('Texts indexed in file: {}'.format(index_file)) return result From 443305823424a435b54b450f895d00c74ad23d18 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 24 Oct 2017 12:14:35 -0700 Subject: [PATCH 097/323] Fix a few more lint issues Change-Id: I0d420f3053f391fa225e4b8179e45fd1138f5c65 --- language/snippets/movie_nl/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/movie_nl/main.py b/language/snippets/movie_nl/main.py index 73e624889298..06be1c9c6f32 100644 --- a/language/snippets/movie_nl/main.py +++ b/language/snippets/movie_nl/main.py @@ -128,7 +128,7 @@ def get_wiki_title(wiki_url): try: content = requests.get(wiki_url).text return content.split('title')[1].split('-')[0].split('>')[1].strip() - except: + except KeyError: return os.path.basename(wiki_url).replace('_', ' ') From 1a8adf15cd1a07b5aa8c161a06d246d0c44e992f Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Wed, 1 Nov 2017 12:29:37 -0700 Subject: [PATCH 098/323] Fixed failed tests on Kokoro (Natural Language API) [(#1185)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1185) * Add Snippet for Listing All Subscriptions in a Project * Fixed the failed tests on Kokoro classify_text_tutorial_test.py::test_query_text classify_text_tutorial_test.py::test_query_category --- language/snippets/classify_text/classify_text_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 1e1605e1ac87..5d793f50ee48 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -133,8 +133,8 @@ def similarity(categories1, categories2): categories1 = split_labels(categories1) categories2 = split_labels(categories2) - norm1 = numpy.linalg.norm(categories1.values()) - norm2 = numpy.linalg.norm(categories2.values()) + norm1 = numpy.linalg.norm(list(categories1.values())) + norm2 = numpy.linalg.norm(list(categories2.values())) # Return the smallest possible similarity if either categories is empty. if norm1 == 0 or norm2 == 0: From fdefb9cec361b4efe352e6bf233acf7912b725c7 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 1 Nov 2017 12:30:10 -0700 Subject: [PATCH 099/323] Auto-update dependencies. [(#1186)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1186) --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index af5ec8147bf1..558e42c22ec8 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 05ff98e10fbd..b6929a06ccf3 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 numpy==1.13.3 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index afc8ed0adf2c..39d8fe989bbf 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index afc8ed0adf2c..39d8fe989bbf 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index e9b67954aeaf..46c981d54a2a 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index af5ec8147bf1..558e42c22ec8 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index afc8ed0adf2c..39d8fe989bbf 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index af5ec8147bf1..558e42c22ec8 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index af5ec8147bf1..558e42c22ec8 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 From 0a0c3978bd5c2f7ece5a1f0532618b1e30bffea4 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 6 Nov 2017 10:44:14 -0800 Subject: [PATCH 100/323] Auto-update dependencies. [(#1199)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1199) * Auto-update dependencies. * Fix iot lint Change-Id: I6289e093bdb35e38f9e9bfc3fbc3df3660f9a67e --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index b6929a06ccf3..a6cd6d6cc3bd 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 numpy==1.13.3 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 39d8fe989bbf..5a82efc96e7e 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 39d8fe989bbf..5a82efc96e7e 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 39d8fe989bbf..5a82efc96e7e 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 From 67afd426f0fafb3a7ded4d54c7954fb8dfaf3381 Mon Sep 17 00:00:00 2001 From: Andrew Gorcester Date: Thu, 9 Nov 2017 09:28:41 -0800 Subject: [PATCH 101/323] Fix linter issue w/ snippets --- language/snippets/cloud-client/v1/snippets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index e13fc7dd6c8b..2b754ace7ac3 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -27,7 +27,6 @@ from google.cloud import language from google.cloud.language import enums from google.cloud.language import types - import six From b072af54352470f3366e427b3a6d3649e55486dd Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 9 Nov 2017 14:45:13 -0800 Subject: [PATCH 102/323] Auto-update dependencies. [(#1208)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1208) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index a6cd6d6cc3bd..701a534230c7 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 numpy==1.13.3 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 5a82efc96e7e..b5848a34cec1 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 5a82efc96e7e..b5848a34cec1 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 5a82efc96e7e..b5848a34cec1 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 From db96e3ae432e53a0fb2f485f4963219305ada08f Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 13 Nov 2017 09:21:46 -0800 Subject: [PATCH 103/323] Language v1 [(#1202)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1202) * copy classify_test samples and tests to v1 * flake * client library version --- language/snippets/cloud-client/v1/snippets.py | 53 +++++++++++++++++++ .../snippets/cloud-client/v1/snippets_test.py | 18 +++++++ 2 files changed, 71 insertions(+) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 2b754ace7ac3..30b591a40371 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -253,12 +253,61 @@ def entity_sentiment_file(gcs_uri): print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [START def_classify_text] +def classify_text(text): + """Classifies content categories of the provided text.""" + client = language.LanguageServiceClient() + + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + + categories = client.classify_text(document).categories + + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_text] + + +# [START def_classify_file] +def classify_file(gcs_uri): + """Classifies content categories of the text in a Google Cloud Storage + file. + """ + client = language.LanguageServiceClient() + + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + + categories = client.classify_text(document).categories + + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_file] + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') + classify_text_parser = subparsers.add_parser( + 'classify-text', help=classify_text.__doc__) + classify_text_parser.add_argument('text') + + classify_text_parser = subparsers.add_parser( + 'classify-file', help=classify_file.__doc__) + classify_text_parser.add_argument('gcs_uri') + sentiment_entities_text_parser = subparsers.add_parser( 'sentiment-entities-text', help=entity_sentiment_text.__doc__) sentiment_entities_text_parser.add_argument('text') @@ -309,3 +358,7 @@ def entity_sentiment_file(gcs_uri): entity_sentiment_text(args.text) elif args.command == 'sentiment-entities-file': entity_sentiment_file(args.gcs_uri) + elif args.command == 'classify-text': + classify_text(args.text) + elif args.command == 'classify-file': + classify_file(args.gcs_uri) diff --git a/language/snippets/cloud-client/v1/snippets_test.py b/language/snippets/cloud-client/v1/snippets_test.py index 168701dc6667..27fbee24d92d 100644 --- a/language/snippets/cloud-client/v1/snippets_test.py +++ b/language/snippets/cloud-client/v1/snippets_test.py @@ -19,6 +19,7 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) +LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) def test_sentiment_text(capsys): @@ -77,3 +78,20 @@ def test_sentiment_entities_utf(capsys): 'foo→bar') out, _ = capsys.readouterr() assert 'Begin Offset : 4' in out + + +def test_classify_text(capsys): + snippets.classify_text( + 'Android is a mobile operating system developed by Google, ' + 'based on the Linux kernel and designed primarily for touchscreen ' + 'mobile devices such as smartphones and tablets.') + out, _ = capsys.readouterr() + assert 'name' in out + assert '/Computers & Electronics' in out + + +def test_classify_file(capsys): + snippets.classify_file(LONG_TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'name' in out + assert '/Computers & Electronics' in out From e675b64aaecd1136a275b24e313bd61a52b1a12f Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 14 Nov 2017 09:16:36 -0800 Subject: [PATCH 104/323] update to use v1 client [(#1216)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1216) * update to use v1 client * set ensure_ascii=False --- .../classify_text/classify_text_tutorial.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 5d793f50ee48..1ac9e0acb7bd 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -27,10 +27,7 @@ import json import os -from google.cloud import language_v1beta2 -from google.cloud.language_v1beta2 import enums -from google.cloud.language_v1beta2 import types - +from google.cloud import language import numpy import six # [END classify_text_tutorial_import] @@ -40,11 +37,11 @@ def classify(text, verbose=True): """Classify the input text into categories. """ - language_client = language_v1beta2.LanguageServiceClient() + language_client = language.LanguageServiceClient() - document = types.Document( + document = language.types.Document( content=text, - type=enums.Document.Type.PLAIN_TEXT) + type=language.enums.Document.Type.PLAIN_TEXT) response = language_client.classify_text(document) categories = response.categories @@ -90,7 +87,7 @@ def index(path, index_file): print('Failed to process {}'.format(file_path)) with io.open(index_file, 'w', encoding='utf-8') as f: - f.write(json.dumps(result)) + f.write(json.dumps(result, ensure_ascii=False)) print('Texts indexed in file: {}'.format(index_file)) return result From 8e99524d67be2a173a956041508fb50213ff4ccd Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 15 Nov 2017 12:18:33 -0800 Subject: [PATCH 105/323] Auto-update dependencies. [(#1217)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1217) --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/movie_nl/requirements.txt | 4 ++-- language/snippets/ocr_nl/requirements.txt | 4 ++-- language/snippets/syntax_triples/requirements.txt | 4 ++-- language/snippets/tutorial/requirements.txt | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 558e42c22ec8..edd6472fec65 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 46c981d54a2a..024960794619 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 558e42c22ec8..edd6472fec65 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 558e42c22ec8..edd6472fec65 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 558e42c22ec8..edd6472fec65 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 From b62c6be3449df63003607a3c3ac4ebc1455d0806 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 6 Dec 2017 14:27:58 -0800 Subject: [PATCH 106/323] Relax regex in ocr_nl sample test [(#1250)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1250) --- language/snippets/ocr_nl/main_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py index e4bf2b5174fb..5a8f72f233a4 100755 --- a/language/snippets/ocr_nl/main_test.py +++ b/language/snippets/ocr_nl/main_test.py @@ -97,4 +97,4 @@ def test_main(tmpdir, capsys): stdout, _ = capsys.readouterr() - assert re.search(r'google was found with count', stdout) + assert re.search(r'.* found with count', stdout) From 15ecc88babc42f6a5d73bbcbcb37138991fa52ce Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 7 Dec 2017 10:34:29 -0800 Subject: [PATCH 107/323] Added "Open in Cloud Shell" buttons to README files [(#1254)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1254) --- language/snippets/README.md | 5 ++ language/snippets/api/README.rst | 15 ++++- language/snippets/api/README.rst.in | 2 + language/snippets/classify_text/README.rst | 58 +++++++++---------- language/snippets/classify_text/README.rst.in | 2 + language/snippets/cloud-client/v1/README.rst | 28 +++++++-- .../snippets/cloud-client/v1/README.rst.in | 2 + .../snippets/cloud-client/v1beta2/README.rst | 21 +++++-- .../cloud-client/v1beta2/README.rst.in | 2 + language/snippets/movie_nl/README.md | 5 ++ language/snippets/ocr_nl/README.md | 5 ++ language/snippets/sentiment/README.md | 5 ++ language/snippets/syntax_triples/README.md | 5 ++ language/snippets/tutorial/README.rst | 13 ++++- language/snippets/tutorial/README.rst.in | 2 + 15 files changed, 123 insertions(+), 47 deletions(-) diff --git a/language/snippets/README.md b/language/snippets/README.md index 1e4a6401bbd8..d0ba56915559 100644 --- a/language/snippets/README.md +++ b/language/snippets/README.md @@ -1,5 +1,10 @@ # Google Cloud Natural Language API examples +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/README.md + This directory contains Python examples that use the [Google Cloud Natural Language API](https://cloud.google.com/natural-language/). diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index e97059a3919c..7434de167bcc 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. @@ -54,6 +58,10 @@ Samples Analyze syntax +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/analyze.py;language/api/README.rst + + To run this sample: @@ -63,17 +71,18 @@ To run this sample: $ python analyze.py usage: analyze.py [-h] {entities,sentiment,syntax} text - + Analyzes text using the Google Cloud Natural Language API. - + positional arguments: {entities,sentiment,syntax} text - + optional arguments: -h, --help show this help message and exit + .. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/api/README.rst.in b/language/snippets/api/README.rst.in index 31294fae1960..f3195edf6b42 100644 --- a/language/snippets/api/README.rst.in +++ b/language/snippets/api/README.rst.in @@ -18,3 +18,5 @@ samples: - name: Analyze syntax file: analyze.py show_help: true + +folder: language/api \ No newline at end of file diff --git a/language/snippets/classify_text/README.rst b/language/snippets/classify_text/README.rst index 0a61591bc22e..2857e0312309 100644 --- a/language/snippets/classify_text/README.rst +++ b/language/snippets/classify_text/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers. This tutorial demostrates how to use the `classify_text` method to classify content category of text files, and use the result to compare texts by their similarity to each other. See the `tutorial page`_ for details about this sample. @@ -21,39 +25,20 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: - -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. @@ -77,6 +62,10 @@ Samples Classify Text Tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/classify_text_tutorial.py;language/classify_text/README.rst + + To run this sample: @@ -87,9 +76,13 @@ To run this sample: usage: classify_text_tutorial.py [-h] {classify,index,query,query-category} ... - - Using the classify_text method to cluster texts. - + + Using the classify_text method to find content categories of text files, + Then use the content category labels to compare text similarity. + + For more information, see the tutorial page at + https://cloud.google.com/natural-language/docs/classify-text-tutorial. + positional arguments: {classify,index,query,query-category} classify Classify the input text into categories. @@ -101,13 +94,14 @@ To run this sample: the query label. The list of all available labels: https://cloud.google.com/natural- language/docs/categories - + optional arguments: -h, --help show this help message and exit + The client library ------------------------------------------------------------------------------- diff --git a/language/snippets/classify_text/README.rst.in b/language/snippets/classify_text/README.rst.in index 42e8f061a5d7..14ee6dc9aa45 100644 --- a/language/snippets/classify_text/README.rst.in +++ b/language/snippets/classify_text/README.rst.in @@ -24,3 +24,5 @@ samples: show_help: true cloud_client_library: true + +folder: language/classify_text \ No newline at end of file diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index cf4c07d20b99..2e93e9af13fd 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - See the `migration guide`_ for information about migrating to Python client library v0.26.1. @@ -58,6 +62,10 @@ Samples Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/quickstart.py;language/cloud-client/v1/README.rst + + To run this sample: @@ -70,6 +78,10 @@ To run this sample: Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py;language/cloud-client/v1/README.rst + + To run this sample: @@ -79,17 +91,20 @@ To run this sample: $ python snippets.py usage: snippets.py [-h] - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... - + This application demonstrates how to perform basic operations with the Google Cloud Natural Language API - + For more information, the documentation at https://cloud.google.com/natural-language/docs. - + positional arguments: - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + classify-text Classifies content categories of the provided text. + classify-file Classifies content categories of the text in a Google + Cloud Storage file. sentiment-entities-text Detects entity sentiment in the provided text. sentiment-entities-file @@ -104,13 +119,14 @@ To run this sample: syntax-text Detects syntax in the text. syntax-file Detects syntax in the file located in Google Cloud Storage. - + optional arguments: -h, --help show this help message and exit + The client library ------------------------------------------------------------------------------- diff --git a/language/snippets/cloud-client/v1/README.rst.in b/language/snippets/cloud-client/v1/README.rst.in index 1b4855fb4f38..06b7ff3e1953 100644 --- a/language/snippets/cloud-client/v1/README.rst.in +++ b/language/snippets/cloud-client/v1/README.rst.in @@ -28,3 +28,5 @@ samples: show_help: true cloud_client_library: true + +folder: language/cloud-client/v1 \ No newline at end of file diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index f2ec309a64e0..aa4ce4524827 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - See the `migration guide`_ for information about migrating to Python client library v0.26.1. @@ -58,6 +62,10 @@ Samples Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py;language/cloud-client/v1beta2/README.rst + + To run this sample: @@ -70,6 +78,10 @@ To run this sample: Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py;language/cloud-client/v1beta2/README.rst + + To run this sample: @@ -81,13 +93,13 @@ To run this sample: usage: snippets.py [-h] {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... - + This application demonstrates how to perform basic operations with the Google Cloud Natural Language API - + For more information, the documentation at https://cloud.google.com/natural-language/docs. - + positional arguments: {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} classify-text Classifies content categories of the provided text. @@ -107,13 +119,14 @@ To run this sample: syntax-text Detects syntax in the text. syntax-file Detects syntax in the file located in Google Cloud Storage. - + optional arguments: -h, --help show this help message and exit + The client library ------------------------------------------------------------------------------- diff --git a/language/snippets/cloud-client/v1beta2/README.rst.in b/language/snippets/cloud-client/v1beta2/README.rst.in index 1b4855fb4f38..d11667458a5a 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst.in +++ b/language/snippets/cloud-client/v1beta2/README.rst.in @@ -28,3 +28,5 @@ samples: show_help: true cloud_client_library: true + +folder: language/cloud-client/v1beta2 \ No newline at end of file diff --git a/language/snippets/movie_nl/README.md b/language/snippets/movie_nl/README.md index 687a6c4058ab..95c05dbbce12 100644 --- a/language/snippets/movie_nl/README.md +++ b/language/snippets/movie_nl/README.md @@ -1,4 +1,9 @@ # Introduction + +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/movie_nl/README.md This sample is an application of the Google Cloud Platform Natural Language API. It uses the [imdb movie reviews data set](https://www.cs.cornell.edu/people/pabo/movie-review-data/) from [Cornell University](http://www.cs.cornell.edu/) and performs sentiment & entity diff --git a/language/snippets/ocr_nl/README.md b/language/snippets/ocr_nl/README.md index 189e93979010..a34ff3179c4c 100644 --- a/language/snippets/ocr_nl/README.md +++ b/language/snippets/ocr_nl/README.md @@ -1,4 +1,9 @@ + +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/ocr_nl/README.md # Using the Cloud Natural Language API to analyze image text found with Cloud Vision This example uses the [Cloud Vision API](https://cloud.google.com/vision/) to diff --git a/language/snippets/sentiment/README.md b/language/snippets/sentiment/README.md index 955629931ee0..313817ef2fe1 100644 --- a/language/snippets/sentiment/README.md +++ b/language/snippets/sentiment/README.md @@ -1,5 +1,10 @@ # Introduction +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/sentiment/README.md + This sample contains the code referenced in the [Sentiment Analysis Tutorial](http://cloud.google.com/natural-language/docs/sentiment-tutorial) within the Google Cloud Natural Language API Documentation. A full walkthrough of this sample diff --git a/language/snippets/syntax_triples/README.md b/language/snippets/syntax_triples/README.md index 1342ee65289d..551057e7217d 100644 --- a/language/snippets/syntax_triples/README.md +++ b/language/snippets/syntax_triples/README.md @@ -1,5 +1,10 @@ # Using the Cloud Natural Language API to find subject-verb-object triples in text +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/syntax_triples/README.md + This example finds subject-verb-object triples in a given piece of text using syntax analysis capabilities of [Cloud Natural Language API](https://cloud.google.com/natural-language/). diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst index 651b219cad1d..202381a6f350 100644 --- a/language/snippets/tutorial/README.rst +++ b/language/snippets/tutorial/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language Tutorial Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/README.rst + + This directory contains samples for Google Cloud Natural Language Tutorial. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. @@ -54,6 +58,10 @@ Samples Language tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py;language/tutorial/README.rst + + To run this sample: @@ -63,16 +71,17 @@ To run this sample: $ python tutorial.py usage: tutorial.py [-h] movie_review_filename - + positional arguments: movie_review_filename The filename of the movie review you'd like to analyze. - + optional arguments: -h, --help show this help message and exit + .. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/tutorial/README.rst.in b/language/snippets/tutorial/README.rst.in index aea593b277f4..945c701e510e 100644 --- a/language/snippets/tutorial/README.rst.in +++ b/language/snippets/tutorial/README.rst.in @@ -18,3 +18,5 @@ samples: - name: Language tutorial file: tutorial.py show_help: true + +folder: language/tutorial \ No newline at end of file From 31950bd35417ab14624f87bfac1c21dca1f19be9 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 8 Jan 2018 08:45:17 -0800 Subject: [PATCH 108/323] Auto-update dependencies. [(#1304)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1304) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 701a534230c7..76c6d7320ecb 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.0.0 -numpy==1.13.3 +numpy==1.14.0 From 2c9fbb587eda7b249125de199b6dc2dec80af882 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 10 Jan 2018 09:07:00 -0800 Subject: [PATCH 109/323] Auto-update dependencies. [(#1309)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1309) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index edd6472fec65..8bb83f801683 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 024960794619..82804723d43d 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index edd6472fec65..8bb83f801683 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index edd6472fec65..8bb83f801683 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index edd6472fec65..8bb83f801683 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 From ea1e3dd070ce6d7b1671c4150652e0be8d4ba3c6 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 1 Feb 2018 22:20:35 -0800 Subject: [PATCH 110/323] Auto-update dependencies. [(#1320)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1320) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 8bb83f801683..4bafec3019aa 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 82804723d43d..60f1bb0a661d 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 8bb83f801683..4bafec3019aa 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 8bb83f801683..4bafec3019aa 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 8bb83f801683..4bafec3019aa 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 From aaff669bb732a0f5f397942c5b1da14ba9c03da1 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Fri, 9 Feb 2018 10:46:48 -0800 Subject: [PATCH 111/323] Auto-update dependencies. [(#1355)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1355) --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 4bafec3019aa..3578404331b4 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 60f1bb0a661d..07059ddca484 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 4bafec3019aa..3578404331b4 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 4bafec3019aa..3578404331b4 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 4bafec3019aa..3578404331b4 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 From 39793ecb1029c94c4d021f21031aa476c203757f Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 26 Feb 2018 09:03:37 -0800 Subject: [PATCH 112/323] Auto-update dependencies. [(#1359)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1359) --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 3578404331b4..500e732f5322 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 76c6d7320ecb..da0faa996317 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.0.0 -numpy==1.14.0 +numpy==1.14.1 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 07059ddca484..06bc7afa8306 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 3578404331b4..500e732f5322 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 3578404331b4..500e732f5322 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 3578404331b4..500e732f5322 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 From ff89cd3a06c3bebcca41e6ff4285cead70aca082 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 5 Mar 2018 12:28:55 -0800 Subject: [PATCH 113/323] Auto-update dependencies. [(#1377)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1377) * Auto-update dependencies. * Update requirements.txt --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index da0faa996317..c858e7a8775d 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 numpy==1.14.1 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index b5848a34cec1..5085e2cd98ea 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index b5848a34cec1..5085e2cd98ea 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index b5848a34cec1..5085e2cd98ea 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 From 9205fecbbf0385dfee84fb5772e9c8cd08654791 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 13 Mar 2018 09:01:09 -0700 Subject: [PATCH 114/323] Auto-update dependencies. [(#1397)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1397) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index c858e7a8775d..5b7339a04f37 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.0.1 -numpy==1.14.1 +numpy==1.14.2 From 58b3511d05c8a2dfd9d70e93466b520649a85a18 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 2 Apr 2018 02:51:10 -0700 Subject: [PATCH 115/323] Auto-update dependencies. --- language/snippets/api/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 2 +- language/snippets/ocr_nl/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 2 +- language/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 500e732f5322..e5f3a6c5cd6b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index 06bc7afa8306..cbe4d142a056 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index 500e732f5322..e5f3a6c5cd6b 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index 500e732f5322..e5f3a6c5cd6b 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index 500e732f5322..e5f3a6c5cd6b 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 From 207e84fac049c78c5d944fe29e68da12299b7698 Mon Sep 17 00:00:00 2001 From: chenyumic Date: Fri, 6 Apr 2018 22:57:36 -0700 Subject: [PATCH 116/323] Regenerate the README files and fix the Open in Cloud Shell link for some samples [(#1441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1441) --- language/snippets/api/README.rst | 4 ++-- language/snippets/classify_text/README.rst | 4 ++-- language/snippets/cloud-client/v1/README.rst | 6 +++--- language/snippets/cloud-client/v1beta2/README.rst | 6 +++--- language/snippets/tutorial/README.rst | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index 7434de167bcc..8ebbe55a279e 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -12,7 +12,7 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -59,7 +59,7 @@ Analyze syntax +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/analyze.py;language/api/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/analyze.py,language/api/README.rst diff --git a/language/snippets/classify_text/README.rst b/language/snippets/classify_text/README.rst index 2857e0312309..2ede54da8d6a 100644 --- a/language/snippets/classify_text/README.rst +++ b/language/snippets/classify_text/README.rst @@ -16,7 +16,7 @@ This tutorial demostrates how to use the `classify_text` method to classify cont -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -63,7 +63,7 @@ Classify Text Tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/classify_text_tutorial.py;language/classify_text/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/classify_text_tutorial.py,language/classify_text/README.rst diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 2e93e9af13fd..7d727df6d81a 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -16,7 +16,7 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -63,7 +63,7 @@ Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/quickstart.py;language/cloud-client/v1/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/quickstart.py,language/cloud-client/v1/README.rst @@ -79,7 +79,7 @@ Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py;language/cloud-client/v1/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py,language/cloud-client/v1/README.rst diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index aa4ce4524827..e981c2481e03 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -16,7 +16,7 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -63,7 +63,7 @@ Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py;language/cloud-client/v1beta2/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py,language/cloud-client/v1beta2/README.rst @@ -79,7 +79,7 @@ Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py;language/cloud-client/v1beta2/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py,language/cloud-client/v1beta2/README.rst diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst index 202381a6f350..08e7ee90b745 100644 --- a/language/snippets/tutorial/README.rst +++ b/language/snippets/tutorial/README.rst @@ -12,7 +12,7 @@ This directory contains samples for Google Cloud Natural Language Tutorial. The -.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -59,7 +59,7 @@ Language tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py;language/tutorial/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py,language/tutorial/README.rst From a84db3f405cd12826ff5ad772c60c24fbb7c2b11 Mon Sep 17 00:00:00 2001 From: Frank Natividad Date: Thu, 26 Apr 2018 10:26:41 -0700 Subject: [PATCH 117/323] Update READMEs to fix numbering and add git clone [(#1464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1464) --- language/snippets/api/README.rst | 10 ++++++++-- language/snippets/classify_text/README.rst | 10 ++++++++-- language/snippets/cloud-client/v1/README.rst | 10 ++++++++-- language/snippets/cloud-client/v1beta2/README.rst | 10 ++++++++-- language/snippets/tutorial/README.rst | 10 ++++++++-- 5 files changed, 40 insertions(+), 10 deletions(-) diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index 8ebbe55a279e..5f4edfd27738 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -31,10 +31,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/classify_text/README.rst b/language/snippets/classify_text/README.rst index 2ede54da8d6a..a1112f21d016 100644 --- a/language/snippets/classify_text/README.rst +++ b/language/snippets/classify_text/README.rst @@ -35,10 +35,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 7d727df6d81a..97f79a34e6a8 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -35,10 +35,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst index e981c2481e03..03400319bebc 100644 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ b/language/snippets/cloud-client/v1beta2/README.rst @@ -35,10 +35,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst index 08e7ee90b745..3f83c1a2c640 100644 --- a/language/snippets/tutorial/README.rst +++ b/language/snippets/tutorial/README.rst @@ -31,10 +31,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. From 1e3de10ae1124d3df85c744c18aeb5b69a10bfb3 Mon Sep 17 00:00:00 2001 From: Torry Yang Date: Fri, 20 Jul 2018 16:24:34 -0700 Subject: [PATCH 118/323] automl beta [(#1575)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1575) * automl initial commit * lint * fix import groupings * add requirements.txt * address review comments --- .../automl/automl_natural_language_dataset.py | 297 +++++++++++++ .../automl/automl_natural_language_model.py | 392 ++++++++++++++++++ .../automl/automl_natural_language_predict.py | 85 ++++ language/snippets/automl/dataset_test.py | 71 ++++ language/snippets/automl/model_test.py | 80 ++++ language/snippets/automl/predict_test.py | 31 ++ language/snippets/automl/requirements.txt | 1 + language/snippets/automl/resources/test.txt | 1 + 8 files changed, 958 insertions(+) create mode 100755 language/snippets/automl/automl_natural_language_dataset.py create mode 100755 language/snippets/automl/automl_natural_language_model.py create mode 100755 language/snippets/automl/automl_natural_language_predict.py create mode 100644 language/snippets/automl/dataset_test.py create mode 100644 language/snippets/automl/model_test.py create mode 100644 language/snippets/automl/predict_test.py create mode 100644 language/snippets/automl/requirements.txt create mode 100644 language/snippets/automl/resources/test.txt diff --git a/language/snippets/automl/automl_natural_language_dataset.py b/language/snippets/automl/automl_natural_language_dataset.py new file mode 100755 index 000000000000..7793d4a60e55 --- /dev/null +++ b/language/snippets/automl/automl_natural_language_dataset.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on Dataset +with the Google AutoML Natural Language API. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/automl/docs/ +""" + +import argparse +import os + + +def create_dataset(project_id, compute_region, dataset_name, multilabel=False): + """Create a dataset.""" + # [START automl_natural_language_create_dataset] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_name = 'DATASET_NAME_HERE' + # multilabel = True for multilabel or False for multiclass + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # Classification type is assigned based on multilabel value. + classification_type = "MULTICLASS" + if multilabel: + classification_type = "MULTILABEL" + + # Specify the text classification type for the dataset. + dataset_metadata = {"classification_type": classification_type} + + # Set dataset name and metadata. + my_dataset = { + "display_name": dataset_name, + "text_classification_dataset_metadata": dataset_metadata, + } + + # Create a dataset with the dataset metadata in the region. + dataset = client.create_dataset(project_location, my_dataset) + + # Display the dataset information. + print("Dataset name: {}".format(dataset.name)) + print("Dataset id: {}".format(dataset.name.split("/")[-1])) + print("Dataset display name: {}".format(dataset.display_name)) + print("Text classification dataset metadata:") + print("\t{}".format(dataset.text_classification_dataset_metadata)) + print("Dataset example count: {}".format(dataset.example_count)) + print("Dataset create time:") + print("\tseconds: {}".format(dataset.create_time.seconds)) + print("\tnanos: {}".format(dataset.create_time.nanos)) + + # [END automl_natural_language_create_dataset] + + +def list_datasets(project_id, compute_region, filter_): + """List all datasets.""" + # [START automl_natural_language_list_datasets] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # filter_ = 'filter expression here' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # List all the datasets available in the region by applying filter. + response = client.list_datasets(project_location, filter_) + + print("List of datasets:") + for dataset in response: + # Display the dataset information. + print("Dataset name: {}".format(dataset.name)) + print("Dataset id: {}".format(dataset.name.split("/")[-1])) + print("Dataset display name: {}".format(dataset.display_name)) + print("Text classification dataset metadata:") + print("\t{}".format(dataset.text_classification_dataset_metadata)) + print("Dataset example count: {}".format(dataset.example_count)) + print("Dataset create time:") + print("\tseconds: {}".format(dataset.create_time.seconds)) + print("\tnanos: {}".format(dataset.create_time.nanos)) + + # [END automl_natural_language_list_datasets] + + +def get_dataset(project_id, compute_region, dataset_id): + """Get the dataset.""" + # [START automl_natural_language_get_dataset] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Get complete detail of the dataset. + dataset = client.get_dataset(dataset_full_id) + + # Display the dataset information. + print("Dataset name: {}".format(dataset.name)) + print("Dataset id: {}".format(dataset.name.split("/")[-1])) + print("Dataset display name: {}".format(dataset.display_name)) + print("Text classification dataset metadata:") + print("\t{}".format(dataset.text_classification_dataset_metadata)) + print("Dataset example count: {}".format(dataset.example_count)) + print("Dataset create time:") + print("\tseconds: {}".format(dataset.create_time.seconds)) + print("\tnanos: {}".format(dataset.create_time.nanos)) + + # [END automl_natural_language_get_dataset] + + +def import_data(project_id, compute_region, dataset_id, path): + """Import labelled items.""" + # [START automl_natural_language_import_data] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + # path = 'gs://path/to/file.csv' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset. + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Get the multiple Google Cloud Storage URIs. + input_uris = path.split(",") + input_config = {"gcs_source": {"input_uris": input_uris}} + + # Import the dataset from the input URI. + response = client.import_data(dataset_full_id, input_config) + + print("Processing import...") + # synchronous check of operation status. + print("Data imported. {}".format(response.result())) + + # [END automl_natural_language_import_data] + + +def export_data(project_id, compute_region, dataset_id, output_uri): + """Export a dataset to a Google Cloud Storage bucket.""" + # [START automl_natural_language_export_data] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + # output_uri: 'gs://location/to/export/data' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset. + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Set the output URI + output_config = {"gcs_destination": {"output_uri_prefix": output_uri}} + + # Export the data to the output URI. + response = client.export_data(dataset_full_id, output_config) + + print("Processing export...") + # synchronous check of operation status. + print("Data exported. {}".format(response.result())) + + # [END automl_natural_language_export_data] + + +def delete_dataset(project_id, compute_region, dataset_id): + """Delete a dataset.""" + # [START automl_natural_language_delete_dataset] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset. + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Delete a dataset. + response = client.delete_dataset(dataset_full_id) + + # synchronous check of operation status. + print("Dataset deleted. {}".format(response.result())) + + # [END automl_natural_language_delete_dataset] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command") + + create_dataset_parser = subparsers.add_parser( + "create_dataset", help=create_dataset.__doc__ + ) + create_dataset_parser.add_argument("dataset_name") + create_dataset_parser.add_argument( + "multilabel", nargs="?", choices=["False", "True"], default="False" + ) + + list_datasets_parser = subparsers.add_parser( + "list_datasets", help=list_datasets.__doc__ + ) + list_datasets_parser.add_argument( + "filter_", nargs="?", default="text_classification_dataset_metadata:*" + ) + + get_dataset_parser = subparsers.add_parser( + "get_dataset", help=get_dataset.__doc__ + ) + get_dataset_parser.add_argument("dataset_id") + + import_data_parser = subparsers.add_parser( + "import_data", help=import_data.__doc__ + ) + import_data_parser.add_argument("dataset_id") + import_data_parser.add_argument("path") + + export_data_parser = subparsers.add_parser( + "export_data", help=export_data.__doc__ + ) + export_data_parser.add_argument("dataset_id") + export_data_parser.add_argument("output_uri") + + delete_dataset_parser = subparsers.add_parser( + "delete_dataset", help=delete_dataset.__doc__ + ) + delete_dataset_parser.add_argument("dataset_id") + + project_id = os.environ["PROJECT_ID"] + compute_region = os.environ["REGION_NAME"] + + args = parser.parse_args() + + if args.command == "create_dataset": + multilabel = True if args.multilabel == "True" else False + create_dataset( + project_id, compute_region, args.dataset_name, multilabel + ) + if args.command == "list_datasets": + list_datasets(project_id, compute_region, args.filter_) + if args.command == "get_dataset": + get_dataset(project_id, compute_region, args.dataset_id) + if args.command == "import_data": + import_data(project_id, compute_region, args.dataset_id, args.path) + if args.command == "export_data": + export_data( + project_id, compute_region, args.dataset_id, args.output_uri + ) + if args.command == "delete_dataset": + delete_dataset(project_id, compute_region, args.dataset_id) diff --git a/language/snippets/automl/automl_natural_language_model.py b/language/snippets/automl/automl_natural_language_model.py new file mode 100755 index 000000000000..84c0d99e4017 --- /dev/null +++ b/language/snippets/automl/automl_natural_language_model.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on model +with the Google AutoML Natural Language API. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/automl/docs/ +""" + +import argparse +import os + + +def create_model(project_id, compute_region, dataset_id, model_name): + """Create a model.""" + # [START automl_natural_language_create_model] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + # model_name = 'MODEL_NAME_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # Set model name and model metadata for the dataset. + my_model = { + "display_name": model_name, + "dataset_id": dataset_id, + "text_classification_model_metadata": {}, + } + + # Create a model with the model metadata in the region. + response = client.create_model(project_location, my_model) + print("Training operation name: {}".format(response.operation.name)) + print("Training started...") + + # [END automl_natural_language_create_model] + + +def get_operation_status(operation_full_id): + """Get operation status.""" + # [START automl_natural_language_get_operation_status] + # TODO(developer): Uncomment and set the following variables + # operation_full_id = + # 'projects//locations//operations/' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the latest state of a long-running operation. + response = client.transport._operations_client.get_operation( + operation_full_id + ) + + print("Operation status: {}".format(response)) + + # [END automl_natural_language_get_operation_status] + + +def list_models(project_id, compute_region, filter_): + """List all models.""" + # [START automl_natural_language_list_models] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # filter_ = 'DATASET_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + from google.cloud.automl_v1beta1 import enums + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # List all the models available in the region by applying filter. + response = client.list_models(project_location, filter_) + + print("List of models:") + for model in response: + # Retrieve deployment state. + deployment_state = "" + if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + deployment_state = "deployed" + else: + deployment_state = "undeployed" + + # Display the model information. + print("Model name: {}".format(model.name)) + print("Model id: {}".format(model.name.split("/")[-1])) + print("Model display name: {}".format(model.display_name)) + print("Model create time:") + print("\tseconds: {}".format(model.create_time.seconds)) + print("\tnanos: {}".format(model.create_time.nanos)) + print("Model deployment state: {}".format(deployment_state)) + + # [END automl_natural_language_list_models] + + +def get_model(project_id, compute_region, model_id): + """Get model details.""" + # [START automl_natural_language_get_model] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + from google.cloud.automl_v1beta1 import enums + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # Get complete detail of the model. + model = client.get_model(model_full_id) + + # Retrieve deployment state. + deployment_state = "" + if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + deployment_state = "deployed" + else: + deployment_state = "undeployed" + + # Display the model information. + print("Model name: {}".format(model.name)) + print("Model id: {}".format(model.name.split("/")[-1])) + print("Model display name: {}".format(model.display_name)) + print("Model create time:") + print("\tseconds: {}".format(model.create_time.seconds)) + print("\tnanos: {}".format(model.create_time.nanos)) + print("Model deployment state: {}".format(deployment_state)) + + # [END automl_natural_language_get_model] + + +def list_model_evaluations(project_id, compute_region, model_id, filter_): + """List model evaluations.""" + # [START automl_natural_language_list_model_evaluations] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # filter_ = 'filter expression here' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # List all the model evaluations in the model by applying filter. + response = client.list_model_evaluations(model_full_id, filter_) + + print("List of model evaluations:") + for element in response: + print(element) + + # [END automl_natural_language_list_model_evaluations] + + +def get_model_evaluation( + project_id, compute_region, model_id, model_evaluation_id +): + """Get model evaluation.""" + # [START automl_natural_language_get_model_evaluation] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # model_evaluation_id = 'MODEL_EVALUATION_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model evaluation. + model_evaluation_full_id = client.model_evaluation_path( + project_id, compute_region, model_id, model_evaluation_id + ) + + # Get complete detail of the model evaluation. + response = client.get_model_evaluation(model_evaluation_full_id) + + print(response) + + # [END automl_natural_language_get_model_evaluation] + + +def display_evaluation(project_id, compute_region, model_id, filter_): + """Display evaluation.""" + # [START automl_natural_language_display_evaluation] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # filter_ = 'filter expression here' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # List all the model evaluations in the model by applying filter. + response = client.list_model_evaluations(model_full_id, filter_) + + # Iterate through the results. + for element in response: + # There is evaluation for each class in a model and for overall model. + # Get only the evaluation of overall model. + if not element.annotation_spec_id: + model_evaluation_id = element.name.split("/")[-1] + + # Resource name for the model evaluation. + model_evaluation_full_id = client.model_evaluation_path( + project_id, compute_region, model_id, model_evaluation_id + ) + + # Get a model evaluation. + model_evaluation = client.get_model_evaluation(model_evaluation_full_id) + + class_metrics = model_evaluation.classification_evaluation_metrics + confidence_metrics_entries = class_metrics.confidence_metrics_entry + + # Showing model score based on threshold of 0.5 + for confidence_metrics_entry in confidence_metrics_entries: + if confidence_metrics_entry.confidence_threshold == 0.5: + print("Precision and recall are based on a score threshold of 0.5") + print( + "Model Precision: {}%".format( + round(confidence_metrics_entry.precision * 100, 2) + ) + ) + print( + "Model Recall: {}%".format( + round(confidence_metrics_entry.recall * 100, 2) + ) + ) + print( + "Model F1 score: {}%".format( + round(confidence_metrics_entry.f1_score * 100, 2) + ) + ) + print( + "Model Precision@1: {}%".format( + round(confidence_metrics_entry.precision_at1 * 100, 2) + ) + ) + print( + "Model Recall@1: {}%".format( + round(confidence_metrics_entry.recall_at1 * 100, 2) + ) + ) + print( + "Model F1 score@1: {}%".format( + round(confidence_metrics_entry.f1_score_at1 * 100, 2) + ) + ) + + # [END automl_natural_language_display_evaluation] + + +def delete_model(project_id, compute_region, model_id): + """Delete a model.""" + # [START automl_natural_language_delete_model] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # Delete a model. + response = client.delete_model(model_full_id) + + # synchronous check of operation status. + print("Model deleted. {}".format(response.result())) + + # [END automl_natural_language_delete_model] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command") + + create_model_parser = subparsers.add_parser( + "create_model", help=create_model.__doc__ + ) + create_model_parser.add_argument("dataset_id") + create_model_parser.add_argument("model_name") + + get_operation_status_parser = subparsers.add_parser( + "get_operation_status", help=get_operation_status.__doc__ + ) + get_operation_status_parser.add_argument("operation_full_id") + + list_models_parser = subparsers.add_parser( + "list_models", help=list_models.__doc__ + ) + list_models_parser.add_argument("filter_") + + get_model_parser = subparsers.add_parser( + "get_model", help=get_model_evaluation.__doc__ + ) + get_model_parser.add_argument("model_id") + + list_model_evaluations_parser = subparsers.add_parser( + "list_model_evaluations", help=list_model_evaluations.__doc__ + ) + list_model_evaluations_parser.add_argument("model_id") + list_model_evaluations_parser.add_argument( + "filter_", nargs="?", default="" + ) + + get_model_evaluation_parser = subparsers.add_parser( + "get_model_evaluation", help=get_model_evaluation.__doc__ + ) + get_model_evaluation_parser.add_argument("model_id") + get_model_evaluation_parser.add_argument("model_evaluation_id") + + display_evaluation_parser = subparsers.add_parser( + "display_evaluation", help=display_evaluation.__doc__ + ) + display_evaluation_parser.add_argument("model_id") + display_evaluation_parser.add_argument("filter_", nargs="?", default="") + + delete_model_parser = subparsers.add_parser( + "delete_model", help=delete_model.__doc__ + ) + delete_model_parser.add_argument("model_id") + + project_id = os.environ["PROJECT_ID"] + compute_region = os.environ["REGION_NAME"] + + args = parser.parse_args() + + if args.command == "create_model": + create_model( + project_id, compute_region, args.dataset_id, args.model_name + ) + if args.command == "get_operation_status": + get_operation_status(args.operation_full_id) + if args.command == "list_models": + list_models(project_id, compute_region, args.filter_) + if args.command == "get_model": + get_model(project_id, compute_region, args.model_id) + if args.command == "list_model_evaluations": + list_model_evaluations( + project_id, compute_region, args.model_id, args.filter_ + ) + if args.command == "get_model_evaluation": + get_model_evaluation( + project_id, compute_region, args.model_id, args.model_evaluation_id + ) + if args.command == "display_evaluation": + display_evaluation( + project_id, compute_region, args.model_id, args.filter_ + ) + if args.command == "delete_model": + delete_model(project_id, compute_region, args.model_id) diff --git a/language/snippets/automl/automl_natural_language_predict.py b/language/snippets/automl/automl_natural_language_predict.py new file mode 100755 index 000000000000..0c25e373467b --- /dev/null +++ b/language/snippets/automl/automl_natural_language_predict.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on prediction +with the Google AutoML Natural Language API. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/automl/docs/ +""" + +import argparse +import os + + +def predict(project_id, compute_region, model_id, file_path): + """Classify the content.""" + # [START automl_natural_language_predict] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # file_path = '/local/path/to/file' + + from google.cloud import automl_v1beta1 as automl + + automl_client = automl.AutoMlClient() + + # Create client for prediction service. + prediction_client = automl.PredictionServiceClient() + + # Get the full path of the model. + model_full_id = automl_client.model_path( + project_id, compute_region, model_id + ) + + # Read the file content for prediction. + with open(file_path, "rb") as content_file: + snippet = content_file.read() + + # Set the payload by giving the content and type of the file. + payload = {"text_snippet": {"content": snippet, "mime_type": "text/plain"}} + + # params is additional domain-specific parameters. + # currently there is no additional parameters supported. + params = {} + response = prediction_client.predict(model_full_id, payload, params) + print("Prediction results:") + for result in response.payload: + print("Predicted class name: {}".format(result.display_name)) + print("Predicted class score: {}".format(result.classification.score)) + + # [END automl_natural_language_predict] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command") + + predict_parser = subparsers.add_parser("predict", help=predict.__doc__) + predict_parser.add_argument("model_id") + predict_parser.add_argument("file_path") + + project_id = os.environ["PROJECT_ID"] + compute_region = os.environ["REGION_NAME"] + + args = parser.parse_args() + + if args.command == "predict": + predict(project_id, compute_region, args.model_id, args.file_path) diff --git a/language/snippets/automl/dataset_test.py b/language/snippets/automl/dataset_test.py new file mode 100644 index 000000000000..41a565c86a2c --- /dev/null +++ b/language/snippets/automl/dataset_test.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +import pytest + +import automl_natural_language_dataset + +project_id = os.environ["GCLOUD_PROJECT"] +compute_region = "us-central1" + + +@pytest.mark.slow +def test_dataset_create_import_delete(capsys): + # create dataset + dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + automl_natural_language_dataset.create_dataset( + project_id, compute_region, dataset_name + ) + out, _ = capsys.readouterr() + create_dataset_output = out.splitlines() + assert "Dataset id: " in create_dataset_output[1] + + # import data + dataset_id = create_dataset_output[1].split()[2] + data = "gs://{}-vcm/happiness.csv".format(project_id) + automl_natural_language_dataset.import_data( + project_id, compute_region, dataset_id, data + ) + out, _ = capsys.readouterr() + assert "Data imported." in out + + # delete dataset + automl_natural_language_dataset.delete_dataset( + project_id, compute_region, dataset_id + ) + out, _ = capsys.readouterr() + assert "Dataset deleted." in out + + +def test_dataset_list_get(capsys): + # list datasets + automl_natural_language_dataset.list_datasets( + project_id, compute_region, "" + ) + out, _ = capsys.readouterr() + list_dataset_output = out.splitlines() + assert "Dataset id: " in list_dataset_output[2] + + # get dataset + dataset_id = list_dataset_output[2].split()[2] + automl_natural_language_dataset.get_dataset( + project_id, compute_region, dataset_id + ) + out, _ = capsys.readouterr() + assert "Dataset name: " in out diff --git a/language/snippets/automl/model_test.py b/language/snippets/automl/model_test.py new file mode 100644 index 000000000000..4e52604e208a --- /dev/null +++ b/language/snippets/automl/model_test.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +from google.cloud import automl_v1beta1 as automl + +import automl_natural_language_model + +project_id = os.environ["GCLOUD_PROJECT"] +compute_region = "us-central1" + + +def test_model_create_status_delete(capsys): + # create model + client = automl.AutoMlClient() + model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + project_location = client.location_path(project_id, compute_region) + my_model = { + "display_name": model_name, + "dataset_id": "2551826603472450019", + "text_classification_model_metadata": {}, + } + response = client.create_model(project_location, my_model) + operation_name = response.operation.name + assert operation_name + + # get operation status + automl_natural_language_model.get_operation_status(operation_name) + out, _ = capsys.readouterr() + assert "Operation status: " in out + + # cancel operation + response.cancel() + + +def test_model_list_get_evaluate(capsys): + # list models + automl_natural_language_model.list_models(project_id, compute_region, "") + out, _ = capsys.readouterr() + list_models_output = out.splitlines() + assert "Model id: " in list_models_output[2] + + # get model + model_id = list_models_output[2].split()[2] + automl_natural_language_model.get_model( + project_id, compute_region, model_id + ) + out, _ = capsys.readouterr() + assert "Model name: " in out + + # list model evaluations + automl_natural_language_model.list_model_evaluations( + project_id, compute_region, model_id, "" + ) + out, _ = capsys.readouterr() + list_evals_output = out.splitlines() + assert "name: " in list_evals_output[1] + + # get model evaluation + model_evaluation_id = list_evals_output[1].split("/")[-1][:-1] + automl_natural_language_model.get_model_evaluation( + project_id, compute_region, model_id, model_evaluation_id + ) + out, _ = capsys.readouterr() + assert "evaluation_metric" in out diff --git a/language/snippets/automl/predict_test.py b/language/snippets/automl/predict_test.py new file mode 100644 index 000000000000..6cf2c69a0e72 --- /dev/null +++ b/language/snippets/automl/predict_test.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import automl_natural_language_predict + +project_id = os.environ["GCLOUD_PROJECT"] +compute_region = "us-central1" + + +def test_predict(capsys): + model_id = "3472481026502981088" + automl_natural_language_predict.predict( + project_id, compute_region, model_id, "resources/test.txt" + ) + out, _ = capsys.readouterr() + assert "Cheese" in out diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt new file mode 100644 index 000000000000..d1bff72ae9ad --- /dev/null +++ b/language/snippets/automl/requirements.txt @@ -0,0 +1 @@ +google-cloud-automl==0.1.0 diff --git a/language/snippets/automl/resources/test.txt b/language/snippets/automl/resources/test.txt new file mode 100644 index 000000000000..f0dde24bd9b1 --- /dev/null +++ b/language/snippets/automl/resources/test.txt @@ -0,0 +1 @@ +A strong taste of hazlenut and orange From 441819308150a169c9e3a65c8607660866ebf82e Mon Sep 17 00:00:00 2001 From: Torry Yang Date: Mon, 23 Jul 2018 20:57:31 -0700 Subject: [PATCH 119/323] use lcm instead of vcm [(#1597)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1597) --- language/snippets/automl/dataset_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/automl/dataset_test.py b/language/snippets/automl/dataset_test.py index 41a565c86a2c..fe68579fc60e 100644 --- a/language/snippets/automl/dataset_test.py +++ b/language/snippets/automl/dataset_test.py @@ -38,7 +38,7 @@ def test_dataset_create_import_delete(capsys): # import data dataset_id = create_dataset_output[1].split()[2] - data = "gs://{}-vcm/happiness.csv".format(project_id) + data = "gs://{}-lcm/happiness.csv".format(project_id) automl_natural_language_dataset.import_data( project_id, compute_region, dataset_id, data ) From b3b2fdf90922f59cad5bb801d5d2ac40dcb5f561 Mon Sep 17 00:00:00 2001 From: Torry Yang Date: Thu, 2 Aug 2018 17:40:16 -0700 Subject: [PATCH 120/323] skip automl model create/delete test [(#1608)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1608) * skip model create/delete test * add skip reason --- language/snippets/automl/model_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/language/snippets/automl/model_test.py b/language/snippets/automl/model_test.py index 4e52604e208a..8f484d2a2ad1 100644 --- a/language/snippets/automl/model_test.py +++ b/language/snippets/automl/model_test.py @@ -18,6 +18,7 @@ import os from google.cloud import automl_v1beta1 as automl +import pytest import automl_natural_language_model @@ -25,6 +26,7 @@ compute_region = "us-central1" +@pytest.mark.skip(reason="creates too many models") def test_model_create_status_delete(capsys): # create model client = automl.AutoMlClient() From 768c8114ebe1ffbd8b58341d98afc14b7f01dbde Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Tue, 21 Aug 2018 15:16:31 -0400 Subject: [PATCH 121/323] Language region tag update [(#1643)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1643) --- .../classify_text/classify_text_tutorial.py | 32 +++++----- .../snippets/cloud-client/v1/quickstart.py | 8 +-- language/snippets/cloud-client/v1/snippets.py | 58 ++++++++++--------- .../snippets/sentiment/sentiment_analysis.py | 18 +++--- 4 files changed, 60 insertions(+), 56 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 1ac9e0acb7bd..2ce388cff09a 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START classify_text_tutorial] +# [START language_classify_text_tutorial] """Using the classify_text method to find content categories of text files, Then use the content category labels to compare text similarity. @@ -21,7 +21,7 @@ https://cloud.google.com/natural-language/docs/classify-text-tutorial. """ -# [START classify_text_tutorial_import] +# [START language_classify_text_tutorial_imports] import argparse import io import json @@ -30,10 +30,10 @@ from google.cloud import language import numpy import six -# [END classify_text_tutorial_import] +# [END language_classify_text_tutorial_imports] -# [START def_classify] +# [START language_classify_text_tutorial_classify] def classify(text, verbose=True): """Classify the input text into categories. """ @@ -61,10 +61,10 @@ def classify(text, verbose=True): print(u'{:<16}: {}'.format('confidence', category.confidence)) return result -# [END def_classify] +# [END language_classify_text_tutorial_classify] -# [START def_index] +# [START language_classify_text_tutorial_index] def index(path, index_file): """Classify each text file in a directory and write the results to the index_file. @@ -91,10 +91,10 @@ def index(path, index_file): print('Texts indexed in file: {}'.format(index_file)) return result -# [END def_index] +# [END language_classify_text_tutorial_index] -# [START def_split_labels] +# [START language_classify_text_tutorial_split_labels] def split_labels(categories): """The category labels are of the form "/a/b/c" up to three levels, for example "/Computers & Electronics/Software", and these labels @@ -121,10 +121,10 @@ def split_labels(categories): _categories[label] = confidence return _categories -# [END def_split_labels] +# [END language_classify_text_tutorial_split_labels] -# [START def_similarity] +# [START language_classify_text_tutorial_similarity] def similarity(categories1, categories2): """Cosine similarity of the categories treated as sparse vectors.""" categories1 = split_labels(categories1) @@ -143,10 +143,10 @@ def similarity(categories1, categories2): dot += confidence * categories2.get(label, 0.0) return dot / (norm1 * norm2) -# [END def_similarity] +# [END language_classify_text_tutorial_similarity] -# [START def_query] +# [START language_classify_text_tutorial_query] def query(index_file, text, n_top=3): """Find the indexed files that are the most similar to the query text. @@ -176,10 +176,10 @@ def query(index_file, text, n_top=3): print('\n') return similarities -# [END def_query] +# [END language_classify_text_tutorial_query] -# [START def_query_category] +# [START language_classify_text_tutorial_query_category] def query_category(index_file, category_string, n_top=3): """Find the indexed files that are the most similar to the query label. @@ -211,7 +211,7 @@ def query_category(index_file, category_string, n_top=3): print('\n') return similarities -# [END def_query_category] +# [END language_classify_text_tutorial_query_category] if __name__ == '__main__': @@ -255,4 +255,4 @@ def query_category(index_file, category_string, n_top=3): query(args.index_file, args.text) if args.command == 'query-category': query_category(args.index_file, args.category) -# [END classify_text_tutorial] +# [END language_classify_text_tutorial] diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index 3c19e395a427..7c075a513b64 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -18,16 +18,16 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - # [START migration_import] + # [START language_python_migration_imports] from google.cloud import language from google.cloud.language import enums from google.cloud.language import types - # [END migration_import] + # [END language_python_migration_imports] # Instantiates a client - # [START migration_client] + # [START language_python_migration_client] client = language.LanguageServiceClient() - # [END migration_client] + # [END language_python_migration_client] # The text to analyze text = u'Hello, world!' diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 30b591a40371..3b1c02f9c68f 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -30,7 +30,7 @@ import six -# [START def_sentiment_text] +# [START language_sentiment_text] def sentiment_text(text): """Detects sentiment in the text.""" client = language.LanguageServiceClient() @@ -39,12 +39,12 @@ def sentiment_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_document_text] - # [START migration_analyze_sentiment] + # [START language_python_migration_document_text] + # [START language_python_migration_sentiment_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) - # [END migration_document_text] + # [END language_python_migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -52,21 +52,21 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) - # [END migration_analyze_sentiment] -# [END def_sentiment_text] + # [END language_python_migration_sentiment_text] +# [END language_sentiment_text] -# [START def_sentiment_file] +# [START language_sentiment_gcs] def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() # Instantiates a plain text document. - # [START migration_document_gcs_uri] + # [START language_python_migration_document_gcs] document = types.Document( gcs_content_uri=gcs_uri, type=enums.Document.Type.PLAIN_TEXT) - # [END migration_document_gcs_uri] + # [END language_python_migration_document_gcs] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -74,10 +74,10 @@ def sentiment_file(gcs_uri): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) -# [END def_sentiment_file] +# [END language_sentiment_gcs] -# [START def_entities_text] +# [START language_entities_text] def entities_text(text): """Detects entities in the text.""" client = language.LanguageServiceClient() @@ -86,7 +86,7 @@ def entities_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_analyze_entities] + # [START language_python_migration_entities_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -107,11 +107,11 @@ def entities_text(text): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) - # [END migration_analyze_entities] -# [END def_entities_text] + # [END language_python_migration_entities_text] +# [END language_entities_text] -# [START def_entities_file] +# [START language_entities_gcs] def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -137,10 +137,10 @@ def entities_file(gcs_uri): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) -# [END def_entities_file] +# [END language_entities_gcs] -# [START def_syntax_text] +# [START language_syntax_text] def syntax_text(text): """Detects syntax in the text.""" client = language.LanguageServiceClient() @@ -149,7 +149,7 @@ def syntax_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_analyze_syntax] + # [START language_python_migration_syntax_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -165,11 +165,11 @@ def syntax_text(text): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) - # [END migration_analyze_syntax] -# [END def_syntax_text] + # [END language_python_migration_syntax_text] +# [END language_syntax_text] -# [START def_syntax_file] +# [START language_syntax_gcs] def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -190,10 +190,10 @@ def syntax_file(gcs_uri): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) -# [END def_syntax_file] +# [END language_syntax_gcs] -# [START def_entity_sentiment_text] +# [START language_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" client = language.LanguageServiceClient() @@ -223,9 +223,10 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] +# [END language_entity_sentiment_text] +# [START language_entity_sentiment_gcs] def entity_sentiment_file(gcs_uri): """Detects entity sentiment in a Google Cloud Storage file.""" client = language.LanguageServiceClient() @@ -251,9 +252,10 @@ def entity_sentiment_file(gcs_uri): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END language_entity_sentiment_gcs] -# [START def_classify_text] +# [START language_classify_text] def classify_text(text): """Classifies content categories of the provided text.""" client = language.LanguageServiceClient() @@ -271,10 +273,10 @@ def classify_text(text): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_text] +# [END language_classify_text] -# [START def_classify_file] +# [START language_classify_gcs] def classify_file(gcs_uri): """Classifies content categories of the text in a Google Cloud Storage file. @@ -291,7 +293,7 @@ def classify_file(gcs_uri): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_file] +# [END language_classify_gcs] if __name__ == '__main__': diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index 8ac8575b08ec..3b572bc2c94d 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -11,19 +11,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START sentiment_tutorial] +# [START language_sentiment_tutorial] """Demonstrates how to make a simple call to the Natural Language API.""" -# [START sentiment_tutorial_import] +# [START language_sentiment_tutorial_imports] import argparse from google.cloud import language from google.cloud.language import enums from google.cloud.language import types -# [END sentiment_tutorial_import] +# [END language_sentiment_tutorial_imports] -# [START def_print_result] +# [START language_sentiment_tutorial_print_result] def print_result(annotations): score = annotations.document_sentiment.score magnitude = annotations.document_sentiment.magnitude @@ -36,10 +36,10 @@ def print_result(annotations): print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) return 0 -# [END def_print_result] +# [END language_sentiment_tutorial_print_result] -# [START def_analyze] +# [START language_sentiment_tutorial_analyze_sentiment] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" client = language.LanguageServiceClient() @@ -55,9 +55,10 @@ def analyze(movie_review_filename): # Print the results print_result(annotations) -# [END def_analyze] +# [END language_sentiment_tutorial_analyze_sentiment] +# [START language_sentiment_tutorial_run_application] if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, @@ -68,4 +69,5 @@ def analyze(movie_review_filename): args = parser.parse_args() analyze(args.movie_review_filename) -# [END sentiment_tutorial] +# [END language_sentiment_tutorial_run_application] +# [END language_sentiment_tutorial] From 9f72967137cd711888f195f9653c549e833c682d Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 28 Aug 2018 11:17:45 -0700 Subject: [PATCH 122/323] Auto-update dependencies. [(#1658)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1658) * Auto-update dependencies. * Rollback appengine/standard/bigquery/. * Rollback appengine/standard/iap/. * Rollback bigtable/metricscaler. * Rolledback appengine/flexible/datastore. * Rollback dataproc/ * Rollback jobs/api_client * Rollback vision/cloud-client. * Rollback functions/ocr/app. * Rollback iot/api-client/end_to_end_example. * Rollback storage/cloud-client. * Rollback kms/api-client. * Rollback dlp/ * Rollback bigquery/cloud-client. * Rollback iot/api-client/manager. * Rollback appengine/flexible/cloudsql_postgresql. --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/automl/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1beta2/requirements.txt | 2 +- language/snippets/movie_nl/requirements.txt | 6 +++--- language/snippets/ocr_nl/requirements.txt | 4 ++-- language/snippets/sentiment/requirements.txt | 2 +- language/snippets/syntax_triples/requirements.txt | 4 ++-- language/snippets/tutorial/requirements.txt | 4 ++-- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e5f3a6c5cd6b..5e9029185cdc 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt index d1bff72ae9ad..9b692618364a 100644 --- a/language/snippets/automl/requirements.txt +++ b/language/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.1.0 +google-cloud-automl==0.1.1 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 5b7339a04f37..d045e22d00e3 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.0.1 -numpy==1.14.2 +google-cloud-language==1.0.2 +numpy==1.15.1 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 5085e2cd98ea..2cbc37eb15b1 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.1 +google-cloud-language==1.0.2 diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt index 5085e2cd98ea..2cbc37eb15b1 100644 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ b/language/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.1 +google-cloud-language==1.0.2 diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt index cbe4d142a056..9718b185a6de 100644 --- a/language/snippets/movie_nl/requirements.txt +++ b/language/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 -requests==2.18.4 +requests==2.19.1 diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt index e5f3a6c5cd6b..5e9029185cdc 100644 --- a/language/snippets/ocr_nl/requirements.txt +++ b/language/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 5085e2cd98ea..2cbc37eb15b1 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.1 +google-cloud-language==1.0.2 diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt index e5f3a6c5cd6b..5e9029185cdc 100644 --- a/language/snippets/syntax_triples/requirements.txt +++ b/language/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt index e5f3a6c5cd6b..5e9029185cdc 100644 --- a/language/snippets/tutorial/requirements.txt +++ b/language/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 From 9fed94d7d13966f195437ea2e3888eed7c478c2d Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Wed, 29 Aug 2018 12:37:06 -0700 Subject: [PATCH 123/323] Update AutoML region tags to use standard product prefixes [(#1669)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1669) --- .../automl/automl_natural_language_dataset.py | 24 +++++++------- .../automl/automl_natural_language_model.py | 32 +++++++++---------- .../automl/automl_natural_language_predict.py | 4 +-- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/language/snippets/automl/automl_natural_language_dataset.py b/language/snippets/automl/automl_natural_language_dataset.py index 7793d4a60e55..df77d54268d4 100755 --- a/language/snippets/automl/automl_natural_language_dataset.py +++ b/language/snippets/automl/automl_natural_language_dataset.py @@ -27,7 +27,7 @@ def create_dataset(project_id, compute_region, dataset_name, multilabel=False): """Create a dataset.""" - # [START automl_natural_language_create_dataset] + # [START automl_language_create_dataset] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -69,12 +69,12 @@ def create_dataset(project_id, compute_region, dataset_name, multilabel=False): print("\tseconds: {}".format(dataset.create_time.seconds)) print("\tnanos: {}".format(dataset.create_time.nanos)) - # [END automl_natural_language_create_dataset] + # [END automl_language_create_dataset] def list_datasets(project_id, compute_region, filter_): """List all datasets.""" - # [START automl_natural_language_list_datasets] + # [START automl_language_list_datasets] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -103,12 +103,12 @@ def list_datasets(project_id, compute_region, filter_): print("\tseconds: {}".format(dataset.create_time.seconds)) print("\tnanos: {}".format(dataset.create_time.nanos)) - # [END automl_natural_language_list_datasets] + # [END automl_language_list_datasets] def get_dataset(project_id, compute_region, dataset_id): """Get the dataset.""" - # [START automl_natural_language_get_dataset] + # [START automl_language_get_dataset] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -137,12 +137,12 @@ def get_dataset(project_id, compute_region, dataset_id): print("\tseconds: {}".format(dataset.create_time.seconds)) print("\tnanos: {}".format(dataset.create_time.nanos)) - # [END automl_natural_language_get_dataset] + # [END automl_language_get_dataset] def import_data(project_id, compute_region, dataset_id, path): """Import labelled items.""" - # [START automl_natural_language_import_data] + # [START automl_language_import_data] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -169,12 +169,12 @@ def import_data(project_id, compute_region, dataset_id, path): # synchronous check of operation status. print("Data imported. {}".format(response.result())) - # [END automl_natural_language_import_data] + # [END automl_language_import_data] def export_data(project_id, compute_region, dataset_id, output_uri): """Export a dataset to a Google Cloud Storage bucket.""" - # [START automl_natural_language_export_data] + # [START automl_language_export_data] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -200,12 +200,12 @@ def export_data(project_id, compute_region, dataset_id, output_uri): # synchronous check of operation status. print("Data exported. {}".format(response.result())) - # [END automl_natural_language_export_data] + # [END automl_language_export_data] def delete_dataset(project_id, compute_region, dataset_id): """Delete a dataset.""" - # [START automl_natural_language_delete_dataset] + # [START automl_language_delete_dataset] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -226,7 +226,7 @@ def delete_dataset(project_id, compute_region, dataset_id): # synchronous check of operation status. print("Dataset deleted. {}".format(response.result())) - # [END automl_natural_language_delete_dataset] + # [END automl_language_delete_dataset] if __name__ == "__main__": diff --git a/language/snippets/automl/automl_natural_language_model.py b/language/snippets/automl/automl_natural_language_model.py index 84c0d99e4017..354721213da5 100755 --- a/language/snippets/automl/automl_natural_language_model.py +++ b/language/snippets/automl/automl_natural_language_model.py @@ -27,7 +27,7 @@ def create_model(project_id, compute_region, dataset_id, model_name): """Create a model.""" - # [START automl_natural_language_create_model] + # [START automl_language_create_model] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -53,12 +53,12 @@ def create_model(project_id, compute_region, dataset_id, model_name): print("Training operation name: {}".format(response.operation.name)) print("Training started...") - # [END automl_natural_language_create_model] + # [END automl_language_create_model] def get_operation_status(operation_full_id): """Get operation status.""" - # [START automl_natural_language_get_operation_status] + # [START automl_language_get_operation_status] # TODO(developer): Uncomment and set the following variables # operation_full_id = # 'projects//locations//operations/' @@ -74,12 +74,12 @@ def get_operation_status(operation_full_id): print("Operation status: {}".format(response)) - # [END automl_natural_language_get_operation_status] + # [END automl_language_get_operation_status] def list_models(project_id, compute_region, filter_): """List all models.""" - # [START automl_natural_language_list_models] + # [START automl_language_list_models] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -114,12 +114,12 @@ def list_models(project_id, compute_region, filter_): print("\tnanos: {}".format(model.create_time.nanos)) print("Model deployment state: {}".format(deployment_state)) - # [END automl_natural_language_list_models] + # [END automl_language_list_models] def get_model(project_id, compute_region, model_id): """Get model details.""" - # [START automl_natural_language_get_model] + # [START automl_language_get_model] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -152,12 +152,12 @@ def get_model(project_id, compute_region, model_id): print("\tnanos: {}".format(model.create_time.nanos)) print("Model deployment state: {}".format(deployment_state)) - # [END automl_natural_language_get_model] + # [END automl_language_get_model] def list_model_evaluations(project_id, compute_region, model_id, filter_): """List model evaluations.""" - # [START automl_natural_language_list_model_evaluations] + # [START automl_language_list_model_evaluations] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -178,14 +178,14 @@ def list_model_evaluations(project_id, compute_region, model_id, filter_): for element in response: print(element) - # [END automl_natural_language_list_model_evaluations] + # [END automl_language_list_model_evaluations] def get_model_evaluation( project_id, compute_region, model_id, model_evaluation_id ): """Get model evaluation.""" - # [START automl_natural_language_get_model_evaluation] + # [START automl_language_get_model_evaluation] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -206,12 +206,12 @@ def get_model_evaluation( print(response) - # [END automl_natural_language_get_model_evaluation] + # [END automl_language_get_model_evaluation] def display_evaluation(project_id, compute_region, model_id, filter_): """Display evaluation.""" - # [START automl_natural_language_display_evaluation] + # [START automl_language_display_evaluation] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -281,12 +281,12 @@ def display_evaluation(project_id, compute_region, model_id, filter_): ) ) - # [END automl_natural_language_display_evaluation] + # [END automl_language_display_evaluation] def delete_model(project_id, compute_region, model_id): """Delete a model.""" - # [START automl_natural_language_delete_model] + # [START automl_language_delete_model] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -305,7 +305,7 @@ def delete_model(project_id, compute_region, model_id): # synchronous check of operation status. print("Model deleted. {}".format(response.result())) - # [END automl_natural_language_delete_model] + # [END automl_language_delete_model] if __name__ == "__main__": diff --git a/language/snippets/automl/automl_natural_language_predict.py b/language/snippets/automl/automl_natural_language_predict.py index 0c25e373467b..b328c7aeb63b 100755 --- a/language/snippets/automl/automl_natural_language_predict.py +++ b/language/snippets/automl/automl_natural_language_predict.py @@ -27,7 +27,7 @@ def predict(project_id, compute_region, model_id, file_path): """Classify the content.""" - # [START automl_natural_language_predict] + # [START automl_language_predict] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -62,7 +62,7 @@ def predict(project_id, compute_region, model_id, file_path): print("Predicted class name: {}".format(result.display_name)) print("Predicted class score: {}".format(result.classification.score)) - # [END automl_natural_language_predict] + # [END automl_language_predict] if __name__ == "__main__": From fca1973e815d6c6cbeec8fc8e2d1d7bc9ed75dc9 Mon Sep 17 00:00:00 2001 From: Rebecca Taylor Date: Thu, 30 Aug 2018 16:01:18 -0700 Subject: [PATCH 124/323] Add small, generated version of `language_sentiment_text` [(#1660)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1660) * Generated sample: language_sentiment_text FYI generated from the following YAML GAPIC config: sample_value_sets: - id: analyze_sentiment title: "Analyzing Sentiment" description: "Proof of concept for analyzing sentiment" parameters: defaults: - document.type=PLAIN_TEXT - document.content="Your text to analyze, e.g. Hello, world!" attributes: - parameter: document.content sample_argument: true on_success: - define: sentiment=$resp.document_sentiment - print: - "Score: %s" - sentiment.score - print: - "Magnitude: %s" - sentiment.magnitude samples: standalone: - calling_forms: ".*" value_sets: analyze_sentiment region_tag: language_sentiment_text * Add requirements.txt (not currently generated) * Add test for language_sentiment_text (not currently generated) * Move language_python_migration_document_text Move language_python_migration_document_text so it uses a different snippet in preparation for deprecation of existing language_sentiment_text sample * Rename generated snippets so filename == region tag * Fix test for generated code sample (file rename to match region tag) * Update Copyright year to 2018 in new hand-written file * Fix lint errors of #language_sentiment_text test * Regenerate #language_sentiment_text to fix lint errors (updated Python sample template) * Binary string support in samples! From PR https://github.com/googleapis/gapic-generator/pull/2272 --- language/snippets/cloud-client/v1/snippets.py | 4 +- .../v1/language_sentiment_text.py | 61 +++++++++++++++++++ .../v1/language_sentiment_text_test.py | 28 +++++++++ .../generated-samples/v1/requirements.txt | 1 + 4 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 language/snippets/generated-samples/v1/language_sentiment_text.py create mode 100644 language/snippets/generated-samples/v1/language_sentiment_text_test.py create mode 100644 language/snippets/generated-samples/v1/requirements.txt diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 3b1c02f9c68f..a41c7cb3ccc6 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -39,12 +39,10 @@ def sentiment_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START language_python_migration_document_text] # [START language_python_migration_sentiment_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -87,9 +85,11 @@ def entities_text(text): # Instantiates a plain text document. # [START language_python_migration_entities_text] + # [START language_python_migration_document_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) + # [END language_python_migration_document_text] # Detects entities in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML diff --git a/language/snippets/generated-samples/v1/language_sentiment_text.py b/language/snippets/generated-samples/v1/language_sentiment_text.py new file mode 100644 index 000000000000..d99f5d09c3a6 --- /dev/null +++ b/language/snippets/generated-samples/v1/language_sentiment_text.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "analyze_sentiment") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +import sys + +# [START language_sentiment_text] + +from google.cloud import language_v1 +from google.cloud.language_v1 import enums +import six + + +def sample_analyze_sentiment(content): + # [START language_sentiment_text_core] + + client = language_v1.LanguageServiceClient() + + # content = 'Your text to analyze, e.g. Hello, world!' + + if isinstance(content, six.binary_type): + content = content.decode('utf-8') + + type_ = enums.Document.Type.PLAIN_TEXT + document = {'type': type_, 'content': content} + + response = client.analyze_sentiment(document) + sentiment = response.document_sentiment + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + # [END language_sentiment_text_core] + + +# [END language_sentiment_text] + + +def main(): + # FIXME: Convert argv from strings to the correct types. + sample_analyze_sentiment(*sys.argv[1:]) + + +if __name__ == '__main__': + main() diff --git a/language/snippets/generated-samples/v1/language_sentiment_text_test.py b/language/snippets/generated-samples/v1/language_sentiment_text_test.py new file mode 100644 index 000000000000..e1876da27525 --- /dev/null +++ b/language/snippets/generated-samples/v1/language_sentiment_text_test.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import language_sentiment_text + + +def test_analyze_sentiment_text_positive(capsys): + language_sentiment_text.sample_analyze_sentiment('Happy Happy Joy Joy') + out, _ = capsys.readouterr() + assert 'Score: 0.' in out + + +def test_analyze_sentiment_text_negative(capsys): + language_sentiment_text.sample_analyze_sentiment('Angry Angry Sad Sad') + out, _ = capsys.readouterr() + assert 'Score: -0.' in out diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt new file mode 100644 index 000000000000..2cbc37eb15b1 --- /dev/null +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -0,0 +1 @@ +google-cloud-language==1.0.2 From 7a994451528953cf94c8bd7922917b4c2879f02b Mon Sep 17 00:00:00 2001 From: Rebecca Taylor Date: Mon, 15 Oct 2018 13:53:04 -0700 Subject: [PATCH 125/323] Access Display Names of enum fields via enum object [(#1738)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1738) * Get display name of enums using IntEnum Requires updating google-cloud-language to 1.1.0 * Add note about gs://demomaker for video test files * Get display name of enums using IntEnum * Get display name of enums using IntEnum * Revert "Add note about gs://demomaker for video test files" This reverts commit 39d9bfff03201f7c6dcb38fee3856dd537ab4b62. --- .../snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/cloud-client/v1/snippets.py | 28 ++++++------------- 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 2cbc37eb15b1..7029093e9515 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.2 +google-cloud-language==1.1.0 diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index a41c7cb3ccc6..826c28c54f1a 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -95,14 +95,11 @@ def entities_text(text): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - for entity in entities: + entity_type = enums.Entity.Type(entity.type) print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) + print(u'{:<16}: {}'.format('type', entity_type.name)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -125,14 +122,11 @@ def entities_file(gcs_uri): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - for entity in entities: + entity_type = enums.Entity.Type(entity.type) print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) + print(u'{:<16}: {}'.format('type', entity_type.name)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -158,12 +152,9 @@ def syntax_text(text): # document.type == enums.Document.Type.HTML tokens = client.analyze_syntax(document).tokens - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) + print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) # [END language_python_migration_syntax_text] # [END language_syntax_text] @@ -183,12 +174,9 @@ def syntax_file(gcs_uri): # document.type == enums.Document.Type.HTML tokens = client.analyze_syntax(document).tokens - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) + print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) # [END language_syntax_gcs] From dac091d26a06eb92247dd51e77af44fc1fe59f88 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Tue, 16 Oct 2018 15:50:57 -0700 Subject: [PATCH 126/323] Remove unused and outdated Natural Language samples [(#1715)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1715) * remove unused beta entity sentiment samples * remove unused beta samples * remove v1beta2 directory * remove outdated unused tutorial * removes remaining googleapiclient.discovery tutorials --- language/snippets/README.md | 11 - .../snippets/cloud-client/v1beta2/README.rst | 151 -------- .../cloud-client/v1beta2/README.rst.in | 32 -- .../cloud-client/v1beta2/quickstart.py | 43 --- .../cloud-client/v1beta2/quickstart_test.py | 22 -- .../cloud-client/v1beta2/requirements.txt | 1 - .../v1beta2/resources/android_text.txt | 1 - .../cloud-client/v1beta2/resources/text.txt | 1 - .../snippets/cloud-client/v1beta2/snippets.py | 346 ----------------- .../cloud-client/v1beta2/snippets_test.py | 106 ------ language/snippets/movie_nl/README.md | 157 -------- language/snippets/movie_nl/main.py | 334 ----------------- language/snippets/movie_nl/main_test.py | 130 ------- language/snippets/movie_nl/requirements.txt | 4 - language/snippets/ocr_nl/README.md | 232 ------------ language/snippets/ocr_nl/main.py | 354 ------------------ language/snippets/ocr_nl/main_test.py | 100 ----- language/snippets/ocr_nl/requirements.txt | 3 - language/snippets/syntax_triples/README.md | 96 ----- language/snippets/syntax_triples/main.py | 172 --------- language/snippets/syntax_triples/main_test.py | 53 --- .../snippets/syntax_triples/requirements.txt | 3 - .../resources/obama_wikipedia.txt | 1 - language/snippets/tutorial/README.rst | 93 ----- language/snippets/tutorial/README.rst.in | 22 -- language/snippets/tutorial/requirements.txt | 3 - .../tutorial/reviews/bladerunner-mixed.txt | 19 - .../tutorial/reviews/bladerunner-neg.txt | 3 - .../tutorial/reviews/bladerunner-neutral.txt | 2 - .../tutorial/reviews/bladerunner-pos.txt | 10 - language/snippets/tutorial/tutorial.py | 69 ---- language/snippets/tutorial/tutorial_test.py | 51 --- 32 files changed, 2625 deletions(-) delete mode 100644 language/snippets/cloud-client/v1beta2/README.rst delete mode 100644 language/snippets/cloud-client/v1beta2/README.rst.in delete mode 100644 language/snippets/cloud-client/v1beta2/quickstart.py delete mode 100644 language/snippets/cloud-client/v1beta2/quickstart_test.py delete mode 100644 language/snippets/cloud-client/v1beta2/requirements.txt delete mode 100644 language/snippets/cloud-client/v1beta2/resources/android_text.txt delete mode 100644 language/snippets/cloud-client/v1beta2/resources/text.txt delete mode 100644 language/snippets/cloud-client/v1beta2/snippets.py delete mode 100644 language/snippets/cloud-client/v1beta2/snippets_test.py delete mode 100644 language/snippets/movie_nl/README.md delete mode 100644 language/snippets/movie_nl/main.py delete mode 100644 language/snippets/movie_nl/main_test.py delete mode 100644 language/snippets/movie_nl/requirements.txt delete mode 100644 language/snippets/ocr_nl/README.md delete mode 100755 language/snippets/ocr_nl/main.py delete mode 100755 language/snippets/ocr_nl/main_test.py delete mode 100644 language/snippets/ocr_nl/requirements.txt delete mode 100644 language/snippets/syntax_triples/README.md delete mode 100644 language/snippets/syntax_triples/main.py delete mode 100755 language/snippets/syntax_triples/main_test.py delete mode 100644 language/snippets/syntax_triples/requirements.txt delete mode 100644 language/snippets/syntax_triples/resources/obama_wikipedia.txt delete mode 100644 language/snippets/tutorial/README.rst delete mode 100644 language/snippets/tutorial/README.rst.in delete mode 100644 language/snippets/tutorial/requirements.txt delete mode 100644 language/snippets/tutorial/reviews/bladerunner-mixed.txt delete mode 100644 language/snippets/tutorial/reviews/bladerunner-neg.txt delete mode 100644 language/snippets/tutorial/reviews/bladerunner-neutral.txt delete mode 100644 language/snippets/tutorial/reviews/bladerunner-pos.txt delete mode 100644 language/snippets/tutorial/tutorial.py delete mode 100644 language/snippets/tutorial/tutorial_test.py diff --git a/language/snippets/README.md b/language/snippets/README.md index d0ba56915559..5689d7c21ab3 100644 --- a/language/snippets/README.md +++ b/language/snippets/README.md @@ -10,17 +10,6 @@ This directory contains Python examples that use the - [api](api) has a simple command line tool that shows off the API's features. -- [movie_nl](movie_nl) combines sentiment and entity analysis to come up with -actors/directors who are the most and least popular in the imdb movie reviews. - -- [ocr_nl](ocr_nl) uses the [Cloud Vision API](https://cloud.google.com/vision/) -to extract text from images, then uses the NL API to extract entity information -from those texts, and stores the extracted information in a database in support -of further analysis and correlation. - - [sentiment](sentiment) contains the [Sentiment Analysis Tutorial](https://cloud.google.com/natural-language/docs/sentiment-tutorial) code as used within the documentation. - -- [syntax_triples](syntax_triples) uses syntax analysis to find -subject-verb-object triples in a given piece of text. diff --git a/language/snippets/cloud-client/v1beta2/README.rst b/language/snippets/cloud-client/v1beta2/README.rst deleted file mode 100644 index 03400319bebc..000000000000 --- a/language/snippets/cloud-client/v1beta2/README.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Natural Language API Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/README.rst - - -This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - -- See the `migration guide`_ for information about migrating to Python client library v0.26.1. - -.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration - - - - -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Quickstart -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py,language/cloud-client/v1beta2/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python quickstart.py - - -Snippets -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py,language/cloud-client/v1beta2/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python snippets.py - - usage: snippets.py [-h] - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - ... - - This application demonstrates how to perform basic operations with the - Google Cloud Natural Language API - - For more information, the documentation at - https://cloud.google.com/natural-language/docs. - - positional arguments: - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - classify-text Classifies content categories of the provided text. - classify-file Classifies content categories of the text in a Google - Cloud Storage file. - sentiment-entities-text - Detects entity sentiment in the provided text. - sentiment-entities-file - Detects entity sentiment in a Google Cloud Storage - file. - sentiment-text Detects sentiment in the text. - sentiment-file Detects sentiment in the file located in Google Cloud - Storage. - entities-text Detects entities in the text. - entities-file Detects entities in the file located in Google Cloud - Storage. - syntax-text Detects syntax in the text. - syntax-file Detects syntax in the file located in Google Cloud - Storage. - - optional arguments: - -h, --help show this help message and exit - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/cloud-client/v1beta2/README.rst.in b/language/snippets/cloud-client/v1beta2/README.rst.in deleted file mode 100644 index d11667458a5a..000000000000 --- a/language/snippets/cloud-client/v1beta2/README.rst.in +++ /dev/null @@ -1,32 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Natural Language API - short_name: Cloud Natural Language API - url: https://cloud.google.com/natural-language/docs/ - description: > - The `Google Cloud Natural Language API`_ provides natural language - understanding technologies to developers, including sentiment analysis, - entity recognition, and syntax analysis. This API is part of the larger - Cloud Machine Learning API. - - - - See the `migration guide`_ for information about migrating to Python client library v0.26.1. - - - .. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration - -setup: -- auth -- install_deps - -samples: -- name: Quickstart - file: quickstart.py -- name: Snippets - file: snippets.py - show_help: true - -cloud_client_library: true - -folder: language/cloud-client/v1beta2 \ No newline at end of file diff --git a/language/snippets/cloud-client/v1beta2/quickstart.py b/language/snippets/cloud-client/v1beta2/quickstart.py deleted file mode 100644 index b19d11b7bc17..000000000000 --- a/language/snippets/cloud-client/v1beta2/quickstart.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def run_quickstart(): - # [START language_quickstart] - # Imports the Google Cloud client library - from google.cloud import language_v1beta2 - from google.cloud.language_v1beta2 import enums - from google.cloud.language_v1beta2 import types - - # Instantiates a client with the v1beta2 version - client = language_v1beta2.LanguageServiceClient() - - # The text to analyze - text = u'Hallo Welt!' - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT, - language='de') - # Detects the sentiment of the text - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Text: {}'.format(text)) - print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) - # [END language_quickstart] - - -if __name__ == '__main__': - run_quickstart() diff --git a/language/snippets/cloud-client/v1beta2/quickstart_test.py b/language/snippets/cloud-client/v1beta2/quickstart_test.py deleted file mode 100644 index 839faae2a00d..000000000000 --- a/language/snippets/cloud-client/v1beta2/quickstart_test.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import quickstart - - -def test_quickstart(capsys): - quickstart.run_quickstart() - out, _ = capsys.readouterr() - assert 'Sentiment' in out diff --git a/language/snippets/cloud-client/v1beta2/requirements.txt b/language/snippets/cloud-client/v1beta2/requirements.txt deleted file mode 100644 index 2cbc37eb15b1..000000000000 --- a/language/snippets/cloud-client/v1beta2/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-language==1.0.2 diff --git a/language/snippets/cloud-client/v1beta2/resources/android_text.txt b/language/snippets/cloud-client/v1beta2/resources/android_text.txt deleted file mode 100644 index c05c452dc008..000000000000 --- a/language/snippets/cloud-client/v1beta2/resources/android_text.txt +++ /dev/null @@ -1 +0,0 @@ -Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets. diff --git a/language/snippets/cloud-client/v1beta2/resources/text.txt b/language/snippets/cloud-client/v1beta2/resources/text.txt deleted file mode 100644 index 97a1cea02b7a..000000000000 --- a/language/snippets/cloud-client/v1beta2/resources/text.txt +++ /dev/null @@ -1 +0,0 @@ -President Obama is speaking at the White House. \ No newline at end of file diff --git a/language/snippets/cloud-client/v1beta2/snippets.py b/language/snippets/cloud-client/v1beta2/snippets.py deleted file mode 100644 index abf16ada560d..000000000000 --- a/language/snippets/cloud-client/v1beta2/snippets.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations with the -Google Cloud Natural Language API - -For more information, the documentation at -https://cloud.google.com/natural-language/docs. -""" - -import argparse -import sys - -# [START beta_import] -from google.cloud import language_v1beta2 -from google.cloud.language_v1beta2 import enums -from google.cloud.language_v1beta2 import types -# [END beta_import] -import six - - -def sentiment_text(text): - """Detects sentiment in the text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - - -def sentiment_file(gcs_uri): - """Detects sentiment in the file located in Google Cloud Storage.""" - client = language_v1beta2.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - - -def entities_text(text): - """Detects entities in the text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects entities in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - - for entity in entities: - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - - -def entities_file(gcs_uri): - """Detects entities in the file located in Google Cloud Storage.""" - client = language_v1beta2.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - - for entity in entities: - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - - -# [START def_entity_sentiment_text] -def entity_sentiment_text(text): - """Detects entity sentiment in the provided text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print('Mentions: ') - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] - - -def entity_sentiment_file(gcs_uri): - """Detects entity sentiment in a Google Cloud Storage file.""" - client = language_v1beta2.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) - - -def syntax_text(text): - """Detects syntax in the text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], - token.text.content)) - - -def syntax_file(gcs_uri): - """Detects syntax in the file located in Google Cloud Storage.""" - client = language_v1beta2.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], - token.text.content)) - - -# [START def_classify_text] -def classify_text(text): - """Classifies content categories of the provided text.""" - # [START beta_client] - client = language_v1beta2.LanguageServiceClient() - # [END beta_client] - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_text] - - -# [START def_classify_file] -def classify_file(gcs_uri): - """Classifies content categories of the text in a Google Cloud Storage - file. - """ - client = language_v1beta2.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_file] - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command') - - classify_text_parser = subparsers.add_parser( - 'classify-text', help=classify_text.__doc__) - classify_text_parser.add_argument('text') - - classify_text_parser = subparsers.add_parser( - 'classify-file', help=classify_file.__doc__) - classify_text_parser.add_argument('gcs_uri') - - sentiment_entities_text_parser = subparsers.add_parser( - 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - sentiment_entities_text_parser.add_argument('text') - - sentiment_entities_file_parser = subparsers.add_parser( - 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - sentiment_entities_file_parser.add_argument('gcs_uri') - - sentiment_text_parser = subparsers.add_parser( - 'sentiment-text', help=sentiment_text.__doc__) - sentiment_text_parser.add_argument('text') - - sentiment_file_parser = subparsers.add_parser( - 'sentiment-file', help=sentiment_file.__doc__) - sentiment_file_parser.add_argument('gcs_uri') - - entities_text_parser = subparsers.add_parser( - 'entities-text', help=entities_text.__doc__) - entities_text_parser.add_argument('text') - - entities_file_parser = subparsers.add_parser( - 'entities-file', help=entities_file.__doc__) - entities_file_parser.add_argument('gcs_uri') - - syntax_text_parser = subparsers.add_parser( - 'syntax-text', help=syntax_text.__doc__) - syntax_text_parser.add_argument('text') - - syntax_file_parser = subparsers.add_parser( - 'syntax-file', help=syntax_file.__doc__) - syntax_file_parser.add_argument('gcs_uri') - - args = parser.parse_args() - - if args.command == 'sentiment-text': - sentiment_text(args.text) - elif args.command == 'sentiment-file': - sentiment_file(args.gcs_uri) - elif args.command == 'entities-text': - entities_text(args.text) - elif args.command == 'entities-file': - entities_file(args.gcs_uri) - elif args.command == 'syntax-text': - syntax_text(args.text) - elif args.command == 'syntax-file': - syntax_file(args.gcs_uri) - elif args.command == 'sentiment-entities-text': - entity_sentiment_text(args.text) - elif args.command == 'sentiment-entities-file': - entity_sentiment_file(args.gcs_uri) - elif args.command == 'classify-text': - classify_text(args.text) - elif args.command == 'classify-file': - classify_file(args.gcs_uri) diff --git a/language/snippets/cloud-client/v1beta2/snippets_test.py b/language/snippets/cloud-client/v1beta2/snippets_test.py deleted file mode 100644 index 5924ffb49176..000000000000 --- a/language/snippets/cloud-client/v1beta2/snippets_test.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import snippets - -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) -LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) - - -def test_sentiment_text(capsys): - snippets.sentiment_text('President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'Score: 0' in out - - -def test_sentiment_utf(capsys): - snippets.sentiment_text( - u'1er site d\'information. Les articles du journal et toute l\'' + - u'actualité en continu : International, France, Société, Economie, ' + - u'Culture, Environnement') - out, _ = capsys.readouterr() - assert 'Score: 0' in out - - -def test_sentiment_file(capsys): - snippets.sentiment_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'Score: 0' in out - - -def test_entities_text(capsys): - snippets.entities_text('President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Obama' in out - - -def test_entities_file(capsys): - snippets.entities_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Obama' in out - - -def test_syntax_text(capsys): - snippets.syntax_text('President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_syntax_file(capsys): - snippets.syntax_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text( - 'President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_utf(capsys): - snippets.entity_sentiment_text( - 'foo→bar') - out, _ = capsys.readouterr() - assert 'Begin Offset : 4' in out - - -def test_classify_text(capsys): - snippets.classify_text( - 'Android is a mobile operating system developed by Google, ' - 'based on the Linux kernel and designed primarily for touchscreen ' - 'mobile devices such as smartphones and tablets.') - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out - - -def test_classify_file(capsys): - snippets.classify_file(LONG_TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out diff --git a/language/snippets/movie_nl/README.md b/language/snippets/movie_nl/README.md deleted file mode 100644 index 95c05dbbce12..000000000000 --- a/language/snippets/movie_nl/README.md +++ /dev/null @@ -1,157 +0,0 @@ -# Introduction - -[![Open in Cloud Shell][shell_img]][shell_link] - -[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png -[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/movie_nl/README.md -This sample is an application of the Google Cloud Platform Natural Language API. -It uses the [imdb movie reviews data set](https://www.cs.cornell.edu/people/pabo/movie-review-data/) -from [Cornell University](http://www.cs.cornell.edu/) and performs sentiment & entity -analysis on it. It combines the capabilities of sentiment analysis and entity recognition -to come up with actors/directors who are the most and least popular. - -### Set Up to Authenticate With Your Project's Credentials - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -**Note:** If you get an error saying your API hasn't been enabled, make sure -that you have correctly set this environment variable, and that the project that -you got the service account from has the Natural Language API enabled. - -## How it works -This sample uses the Natural Language API to annotate the input text. The -movie review document is broken into sentences using the `extract_syntax` feature. -Each sentence is sent to the API for sentiment analysis. The positive and negative -sentiment values are combined to come up with a single overall sentiment of the -movie document. - -In addition to the sentiment, the program also extracts the entities of type -`PERSON`, who are the actors in the movie (including the director and anyone -important). These entities are assigned the sentiment value of the document to -come up with the most and least popular actors/directors. - -### Movie document -We define a movie document as a set of reviews. These reviews are individual -sentences and we use the NL API to extract the sentences from the document. See -an example movie document below. - -``` - Sample review sentence 1. Sample review sentence 2. Sample review sentence 3. -``` - -### Sentences and Sentiment -Each sentence from the above document is assigned a sentiment as below. - -``` - Sample review sentence 1 => Sentiment 1 - Sample review sentence 2 => Sentiment 2 - Sample review sentence 3 => Sentiment 3 -``` - -### Sentiment computation -The final sentiment is computed by simply adding the sentence sentiments. - -``` - Total Sentiment = Sentiment 1 + Sentiment 2 + Sentiment 3 -``` - - -### Entity extraction and Sentiment assignment -Entities with type `PERSON` are extracted from the movie document using the NL -API. Since these entities are mentioned in their respective movie document, -they are associated with the document sentiment. - -``` - Document 1 => Sentiment 1 - - Person 1 - Person 2 - Person 3 - - Document 2 => Sentiment 2 - - Person 2 - Person 4 - Person 5 -``` - -Based on the above data we can calculate the sentiment associated with Person 2: - -``` - Person 2 => (Sentiment 1 + Sentiment 2) -``` - -## Movie Data Set -We have used the Cornell Movie Review data as our input. Please follow the instructions below to download and extract the data. - -### Download Instructions - -``` - $ curl -O http://www.cs.cornell.edu/people/pabo/movie-review-data/mix20_rand700_tokens.zip - $ unzip mix20_rand700_tokens.zip -``` - -## Command Line Usage -In order to use the movie analyzer, follow the instructions below. (Note that the `--sample` parameter below runs the script on -fewer documents, and can be omitted to run it on the entire corpus) - -### Install Dependencies - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -Then, install dependencies by running the following pip command: - -``` -$ pip install -r requirements.txt -``` -### How to Run - -``` -$ python main.py analyze --inp "tokens/*/*" \ - --sout sentiment.json \ - --eout entity.json \ - --sample 5 -``` - -You should see the log file `movie.log` created. - -## Output Data -The program produces sentiment and entity output in json format. For example: - -### Sentiment Output -``` - { - "doc_id": "cv310_tok-16557.txt", - "sentiment": 3.099, - "label": -1 - } -``` - -### Entity Output - -``` - { - "name": "Sean Patrick Flanery", - "wiki_url": "http://en.wikipedia.org/wiki/Sean_Patrick_Flanery", - "sentiment": 3.099 - } -``` - -### Entity Output Sorting -In order to sort and rank the entities generated, use the same `main.py` script. For example, -this will print the top 5 actors with negative sentiment: - -``` -$ python main.py rank --entity_input entity.json \ - --sentiment neg \ - --reverse True \ - --sample 5 -``` diff --git a/language/snippets/movie_nl/main.py b/language/snippets/movie_nl/main.py deleted file mode 100644 index 06be1c9c6f32..000000000000 --- a/language/snippets/movie_nl/main.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2016 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import codecs -import glob -import json -import logging -import os - -import googleapiclient.discovery -from googleapiclient.errors import HttpError -import requests - - -def analyze_document(service, document): - """Analyze the document and get the distribution of sentiments and - the movie name.""" - logging.info('Analyzing {}'.format(document.doc_id)) - - sentiments, entities = document.extract_sentiment_entities(service) - return sentiments, entities - - -def get_request_body(text, syntax=True, entities=True, sentiment=True): - """Creates the body of the request to the language api in - order to get an appropriate api response.""" - body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'features': { - 'extract_syntax': syntax, - 'extract_entities': entities, - 'extract_document_sentiment': sentiment, - }, - 'encoding_type': 'UTF32' - } - - return body - - -class Document(object): - """Document class captures a single document of movie reviews.""" - - def __init__(self, text, doc_id, doc_path): - self.text = text - self.doc_id = doc_id - self.doc_path = doc_path - self.sentiment_entity_pair = None - self.label = None - - def extract_sentiment_entities(self, service): - """Extract the sentences in a document.""" - - if self.sentiment_entity_pair is not None: - return self.sentence_entity_pair - - docs = service.documents() - request_body = get_request_body( - self.text, - syntax=False, - entities=True, - sentiment=True) - request = docs.annotateText(body=request_body) - - ent_list = [] - - response = request.execute() - entities = response.get('entities', []) - documentSentiment = response.get('documentSentiment', {}) - - for entity in entities: - ent_type = entity.get('type') - wiki_url = entity.get('metadata', {}).get('wikipedia_url') - - if ent_type == 'PERSON' and wiki_url is not None: - ent_list.append(wiki_url) - - self.sentiment_entity_pair = (documentSentiment, ent_list) - - return self.sentiment_entity_pair - - -def to_sentiment_json(doc_id, sent, label): - """Convert the sentiment info to json. - - Args: - doc_id: Document id - sent: Overall Sentiment for the document - label: Actual label +1, 0, -1 for the document - - Returns: - String json representation of the input - - """ - json_doc = {} - - json_doc['doc_id'] = doc_id - json_doc['sentiment'] = float('%.3f' % sent) - json_doc['label'] = label - - return json.dumps(json_doc) - - -def get_wiki_title(wiki_url): - """Get the wikipedia page title for a given wikipedia URL. - - Args: - wiki_url: Wikipedia URL e.g., http://en.wikipedia.org/wiki/Sean_Connery - - Returns: - Wikipedia canonical name e.g., Sean Connery - - """ - try: - content = requests.get(wiki_url).text - return content.split('title')[1].split('-')[0].split('>')[1].strip() - except KeyError: - return os.path.basename(wiki_url).replace('_', ' ') - - -def to_entity_json(entity, entity_sentiment, entity_frequency): - """Convert entities and their associated sentiment to json. - - Args: - entity: Wikipedia entity name - entity_sentiment: Sentiment associated with the entity - entity_frequency: Frequency of the entity in the corpus - - Returns: - Json string representation of input - - """ - json_doc = {} - - avg_sentiment = float(entity_sentiment) / float(entity_frequency) - - json_doc['wiki_url'] = entity - json_doc['name'] = get_wiki_title(entity) - json_doc['sentiment'] = float('%.3f' % entity_sentiment) - json_doc['avg_sentiment'] = float('%.3f' % avg_sentiment) - - return json.dumps(json_doc) - - -def get_sentiment_entities(service, document): - """Compute the overall sentiment volume in the document. - - Args: - service: Client to Google Natural Language API - document: Movie review document (See Document object) - - Returns: - Tuple of total sentiment and entities found in the document - - """ - - sentiments, entities = analyze_document(service, document) - score = sentiments.get('score') - - return (score, entities) - - -def get_sentiment_label(sentiment): - """Return the sentiment label based on the sentiment quantity.""" - if sentiment < 0: - return -1 - elif sentiment > 0: - return 1 - else: - return 0 - - -def process_movie_reviews(service, reader, sentiment_writer, entity_writer): - """Perform some sentiment math and come up with movie review.""" - collected_entities = {} - - for document in reader: - try: - sentiment_total, entities = get_sentiment_entities( - service, document) - except HttpError as e: - logging.error('Error process_movie_reviews {}'.format(e.content)) - continue - - document.label = get_sentiment_label(sentiment_total) - - sentiment_writer.write( - to_sentiment_json( - document.doc_id, - sentiment_total, - document.label - ) - ) - - sentiment_writer.write('\n') - - for ent in entities: - ent_sent, frequency = collected_entities.get(ent, (0, 0)) - ent_sent += sentiment_total - frequency += 1 - - collected_entities[ent] = (ent_sent, frequency) - - for entity, sentiment_frequency in collected_entities.items(): - entity_writer.write(to_entity_json(entity, sentiment_frequency[0], - sentiment_frequency[1])) - entity_writer.write('\n') - - sentiment_writer.flush() - entity_writer.flush() - - -def document_generator(dir_path_pattern, count=None): - """Generator for the input movie documents. - - Args: - dir_path_pattern: Input dir pattern e.g., "foo/bar/*/*" - count: Number of documents to read else everything if None - - Returns: - Generator which contains Document (See above) - - """ - for running_count, item in enumerate(glob.iglob(dir_path_pattern)): - if count and running_count >= count: - raise StopIteration() - - doc_id = os.path.basename(item) - - with codecs.open(item, encoding='utf-8') as f: - try: - text = f.read() - except UnicodeDecodeError: - continue - - yield Document(text, doc_id, item) - - -def rank_entities(reader, sentiment=None, topn=None, reverse_bool=False): - """Rank the entities (actors) based on their sentiment - assigned from the movie.""" - - items = [] - for item in reader: - json_item = json.loads(item) - sent = json_item.get('sentiment') - entity_item = (sent, json_item) - - if sentiment: - if sentiment == 'pos' and sent > 0: - items.append(entity_item) - elif sentiment == 'neg' and sent < 0: - items.append(entity_item) - else: - items.append(entity_item) - - items.sort(reverse=reverse_bool) - items = [json.dumps(item[1]) for item in items] - - print('\n'.join(items[:topn])) - - -def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): - """Analyze the document for sentiment and entities""" - - # Create logger settings - logging.basicConfig(filename=log_file, level=logging.DEBUG) - - # Create a Google Service object - service = googleapiclient.discovery.build('language', 'v1') - - reader = document_generator(input_dir, sample) - - # Process the movie documents - process_movie_reviews(service, reader, sentiment_writer, entity_writer) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - - subparsers = parser.add_subparsers(dest='command') - - rank_parser = subparsers.add_parser('rank') - - rank_parser.add_argument( - '--entity_input', help='location of entity input') - rank_parser.add_argument( - '--sentiment', help='filter sentiment as "neg" or "pos"') - rank_parser.add_argument( - '--reverse', help='reverse the order of the items', type=bool, - default=False - ) - rank_parser.add_argument( - '--sample', help='number of top items to process', type=int, - default=None - ) - - analyze_parser = subparsers.add_parser('analyze') - - analyze_parser.add_argument( - '--inp', help='location of the input', required=True) - analyze_parser.add_argument( - '--sout', help='location of the sentiment output', required=True) - analyze_parser.add_argument( - '--eout', help='location of the entity output', required=True) - analyze_parser.add_argument( - '--sample', help='number of top items to process', type=int) - analyze_parser.add_argument('--log_file', default='movie.log') - - args = parser.parse_args() - - if args.command == 'analyze': - with open(args.sout, 'w') as sout, open(args.eout, 'w') as eout: - analyze(args.inp, sout, eout, args.sample, args.log_file) - elif args.command == 'rank': - with open(args.entity_input, 'r') as entity_input: - rank_entities( - entity_input, args.sentiment, args.sample, args.reverse) diff --git a/language/snippets/movie_nl/main_test.py b/language/snippets/movie_nl/main_test.py deleted file mode 100644 index 7e33cefd3e30..000000000000 --- a/language/snippets/movie_nl/main_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2016 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -import googleapiclient.discovery -import six - -import main - - -def test_get_request_body(): - text = 'hello world' - body = main.get_request_body(text, syntax=True, entities=True, - sentiment=False) - assert body.get('document').get('content') == text - - assert body.get('features').get('extract_syntax') is True - assert body.get('features').get('extract_entities') is True - assert body.get('features').get('extract_document_sentiment') is False - - -def test_get_sentiment_label(): - assert main.get_sentiment_label(20.50) == 1 - assert main.get_sentiment_label(-42.34) == -1 - - -def test_to_sentiment_json(): - doc_id = '12345' - sentiment = 23.344564 - label = 1 - - sentiment_json = json.loads( - main.to_sentiment_json(doc_id, sentiment, label) - ) - - assert sentiment_json.get('doc_id') == doc_id - assert sentiment_json.get('sentiment') == 23.345 - assert sentiment_json.get('label') == label - - -def test_process_movie_reviews(): - service = googleapiclient.discovery.build('language', 'v1') - - doc1 = main.Document('Top Gun was awesome and Tom Cruise rocked!', 'doc1', - 'doc1') - doc2 = main.Document('Tom Cruise is a great actor.', 'doc2', 'doc2') - - reader = [doc1, doc2] - swriter = six.StringIO() - ewriter = six.StringIO() - - main.process_movie_reviews(service, reader, swriter, ewriter) - - sentiments = swriter.getvalue().strip().split('\n') - entities = ewriter.getvalue().strip().split('\n') - - sentiments = [json.loads(sentiment) for sentiment in sentiments] - entities = [json.loads(entity) for entity in entities] - - # assert sentiments - assert sentiments[0].get('sentiment') > 0 - assert sentiments[0].get('label') == 1 - - assert sentiments[1].get('sentiment') > 0 - assert sentiments[1].get('label') == 1 - - # assert entities - assert len(entities) == 1 - assert entities[0].get('name') == 'Tom Cruise' - assert (entities[0].get('wiki_url') == - 'https://en.wikipedia.org/wiki/Tom_Cruise') - assert entities[0].get('sentiment') > 0 - - -def test_rank_positive_entities(capsys): - reader = [ - ('{"avg_sentiment": -12.0, ' - '"name": "Patrick Macnee", "sentiment": -12.0}'), - ('{"avg_sentiment": 5.0, ' - '"name": "Paul Rudd", "sentiment": 5.0}'), - ('{"avg_sentiment": -5.0, ' - '"name": "Martha Plimpton", "sentiment": -5.0}'), - ('{"avg_sentiment": 7.0, ' - '"name": "Lucy (2014 film)", "sentiment": 7.0}') - ] - - main.rank_entities(reader, 'pos', topn=1, reverse_bool=False) - out, err = capsys.readouterr() - - expected = ('{"avg_sentiment": 5.0, ' - '"name": "Paul Rudd", "sentiment": 5.0}') - - expected = ''.join(sorted(expected)) - out = ''.join(sorted(out.strip())) - assert out == expected - - -def test_rank_negative_entities(capsys): - reader = [ - ('{"avg_sentiment": -12.0, ' - '"name": "Patrick Macnee", "sentiment": -12.0}'), - ('{"avg_sentiment": 5.0, ' - '"name": "Paul Rudd", "sentiment": 5.0}'), - ('{"avg_sentiment": -5.0, ' - '"name": "Martha Plimpton", "sentiment": -5.0}'), - ('{"avg_sentiment": 7.0, ' - '"name": "Lucy (2014 film)", "sentiment": 7.0}') - ] - - main.rank_entities(reader, 'neg', topn=1, reverse_bool=True) - out, err = capsys.readouterr() - - expected = ('{"avg_sentiment": -5.0, ' - '"name": "Martha Plimpton", "sentiment": -5.0}') - - expected = ''.join(sorted(expected)) - out = ''.join(sorted(out.strip())) - assert out == expected diff --git a/language/snippets/movie_nl/requirements.txt b/language/snippets/movie_nl/requirements.txt deleted file mode 100644 index 9718b185a6de..000000000000 --- a/language/snippets/movie_nl/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 -requests==2.19.1 diff --git a/language/snippets/ocr_nl/README.md b/language/snippets/ocr_nl/README.md deleted file mode 100644 index a34ff3179c4c..000000000000 --- a/language/snippets/ocr_nl/README.md +++ /dev/null @@ -1,232 +0,0 @@ - - -[![Open in Cloud Shell][shell_img]][shell_link] - -[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png -[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/ocr_nl/README.md -# Using the Cloud Natural Language API to analyze image text found with Cloud Vision - -This example uses the [Cloud Vision API](https://cloud.google.com/vision/) to -detect text in images, then analyzes that text using the [Cloud NL (Natural -Language) API](https://cloud.google.com/natural-language/) to detect -[entities](https://cloud.google.com/natural-language/docs/basics#entity_analysis) -in the text. It stores the detected entity -information in an [sqlite3](https://www.sqlite.org) database, which may then be -queried. - -(This kind of analysis can be useful with scans of brochures and fliers, -invoices, and other types of company documents... or maybe just organizing your -memes). - -After the example script has analyzed a directory of images, it outputs some -information on the images' entities to STDOUT. You can also further query -the generated sqlite3 database. - -## Setup - -### Install sqlite3 as necessary - -The example requires that sqlite3 be installed. Most likely, sqlite3 is already -installed for you on your machine, but if not, you can find it -[here](https://www.sqlite.org/download.html). - -### Set Up to Authenticate With Your Project's Credentials - -* Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. -* Following those steps, make sure that you [Set Up a Service - Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), - and export the following environment variable: - - ``` - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json - ``` -* This sample also requires that you [enable the Cloud Vision - API](https://console.cloud.google.com/apis/api/vision.googleapis.com/overview?project=_) - -## Running the example - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -```sh -$ pip install -r requirements.txt -``` - -You must also be set up to authenticate with the Cloud APIs using your -project's service account credentials, as described above. - -Then, run the script on a directory of images to do the analysis, E.g.: - -```sh -$ python main.py --input_directory= -``` - -You can try this on a sample directory of images: - -```sh -$ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip -$ unzip ocr_nl-images.zip -$ python main.py --input_directory=images/ -``` - -## A walkthrough of the example and its results - -Let's take a look at what the example generates when run on the `images/` -sample directory, and how it does it. - -The script looks at each image file in the given directory, and uses the Vision -API's text detection capabilities (OCR) to find any text in each image. It -passes that info to the NL API, and asks it to detect [entities](xxx) in the -discovered text, then stores this information in a queryable database. - -To keep things simple, we're just passing to the NL API all the text found in a -given image, in one string. Note that sometimes this string can include -misinterpreted characters (if the image text was not very clear), or list words -"out of order" from how a human would interpret them. So, the text that is -actually passed to the NL API might not be quite what you would have predicted -with your human eyeballs. - -The Entity information returned by the NL API includes *type*, *name*, *salience*, -information about where in the text the given entity was found, and detected -language. It may also include *metadata*, including a link to a Wikipedia URL -that the NL API believes this entity maps to. See the -[documentation](https://cloud.google.com/natural-language/docs/) and the [API -reference pages](https://cloud.google.com/natural-language/reference/rest/v1beta1/Entity) -for more information about `Entity` fields. - -For example, if the NL API was given the sentence: - -``` -"Holmes and Watson walked over to the cafe." -``` - -it would return a response something like the following: - -``` -{ - "entities": [{ - "salience": 0.51629782, - "mentions": [{ - "text": { - "content": "Holmes", - "beginOffset": 0 - }}], - "type": "PERSON", - "name": "Holmes", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/Sherlock_Holmes" - }}, - { - "salience": 0.22334209, - "mentions": [{ - "text": { - "content": "Watson", - "beginOffset": 11 - }}], - "type": "PERSON", - "name": "Watson", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/Dr._Watson" - }}], - "language": "en" -} -``` - -Note that the NL API determined from context that "Holmes" was referring to -'Sherlock Holmes', even though the name "Sherlock" was not included. - -Note also that not all nouns in a given sentence are detected as Entities. An -Entity represents a phrase in the text that is a known entity, such as a person, -an organization, or location. The generic mention of a 'cafe' is not treated as -an entity in this sense. - -For each image file, we store its detected entity information (if any) in an -sqlite3 database. - -### Querying for information about the detected entities - -Once the detected entity information from all the images is stored in the -sqlite3 database, we can run some queries to do some interesting analysis. The -script runs a couple of such example query sets and outputs the result to STDOUT. - -The first set of queries outputs information about the top 15 most frequent -entity names found in the images, and the second outputs information about the -top 15 most frequent Wikipedia URLs found. - -For example, with the sample image set, note that the name 'Sherlock Holmes' is -found three times, but entities associated with the URL -http://en.wikipedia.org/wiki/Sherlock_Holmes are found four times; one of the -entity names was only "Holmes", but the NL API detected from context that it -referred to Sherlock Holmes. Similarly, you can see that mentions of 'Hive' and -'Spark' mapped correctly – given their context – to the URLs of those Apache -products. - -``` -----entity: http://en.wikipedia.org/wiki/Apache_Hive was found with count 1 -Found in file images/IMG_20160621_133020.jpg, detected as type OTHER, with - locale en. -names(s): set([u'hive']) -salience measure(s): set([0.0023808887]) -``` - -Similarly, 'Elizabeth' (in screencaps of text from "Pride and Prejudice") is -correctly mapped to http://en.wikipedia.org/wiki/Elizabeth_Bennet because of the -context of the surrounding text. - -``` -----entity: http://en.wikipedia.org/wiki/Elizabeth_Bennet was found with count 2 -Found in file images/Screenshot 2016-06-19 11.51.50.png, detected as type PERSON, with - locale en. -Found in file images/Screenshot 2016-06-19 12.08.30.png, detected as type PERSON, with - locale en. -names(s): set([u'elizabeth']) -salience measure(s): set([0.34601286, 0.0016268975]) -``` - -## Further queries to the sqlite3 database - -When the script runs, it makes a couple of example queries to the database -containing the entity information returned from the NL API. You can make further -queries on that database by starting up sqlite3 from the command line, and -passing it the name of the database file generated by running the example. This -file will be in the same directory, and have `entities` as a prefix, with the -timestamp appended. (If you have run the example more than once, a new database -file will be created each time). - -Run sqlite3 as follows (using the name of your own database file): - -```sh -$ sqlite3 entities1466518508.db -``` - -You'll see something like this: - -``` -SQLite version 3.8.10.2 2015-05-20 18:17:19 -Enter ".help" for usage hints. -sqlite> -``` - -From this prompt, you can make any queries on the data that you want. E.g., -start with something like: - -``` -sqlite> select * from entities limit 20; -``` - -Or, try this to see in which images the most entities were detected: - -``` -sqlite> select filename, count(filename) from entities group by filename; -``` - -You can do more complex queries to get further information about the entities -that have been discovered in your images. E.g., you might want to investigate -which of the entities are most commonly found together in the same image. See -the [SQLite documentation](https://www.sqlite.org/docs.html) for more -information. - - diff --git a/language/snippets/ocr_nl/main.py b/language/snippets/ocr_nl/main.py deleted file mode 100755 index db156054450b..000000000000 --- a/language/snippets/ocr_nl/main.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This example uses the Google Cloud Vision API to detect text in images, then -analyzes that text using the Google Cloud Natural Language API to detect -entities in the text. It stores the detected entity information in an sqlite3 -database, which may then be queried. - -After this script has analyzed a directory of images, it outputs some -information on the images' entities to STDOUT. You can also further query -the generated sqlite3 database; see the README for more information. - -Run the script on a directory of images to do the analysis, E.g.: - $ python main.py --input_directory= - -You can try this on a sample directory of images: - $ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip - $ unzip ocr_nl-images.zip - $ python main.py --input_directory=images/ - -""" # noqa - -import argparse -import base64 -import contextlib -import logging -import os -import sqlite3 -import sys -import time - -import googleapiclient.discovery -import googleapiclient.errors - -BATCH_SIZE = 10 - - -class VisionApi(object): - """Construct and use the Cloud Vision API service.""" - - def __init__(self): - self.service = googleapiclient.discovery.build('vision', 'v1') - - def detect_text(self, input_filenames, num_retries=3, max_results=6): - """Uses the Vision API to detect text in the given file.""" - batch_request = [] - for filename in input_filenames: - request = { - 'image': {}, - 'features': [{ - 'type': 'TEXT_DETECTION', - 'maxResults': max_results, - }] - } - - # Accept both files in cloud storage, as well as local files. - if filename.startswith('gs://'): - request['image']['source'] = { - 'gcsImageUri': filename - } - else: - with open(filename, 'rb') as image_file: - request['image']['content'] = base64.b64encode( - image_file.read()).decode('UTF-8') - - batch_request.append(request) - - request = self.service.images().annotate( - body={'requests': batch_request}) - - try: - responses = request.execute(num_retries=num_retries) - if 'responses' not in responses: - return {} - - text_response = {} - for filename, response in zip( - input_filenames, responses['responses']): - - if 'error' in response: - logging.error('API Error for {}: {}'.format( - filename, - response['error'].get('message', ''))) - continue - - text_response[filename] = response.get('textAnnotations', []) - - return text_response - - except googleapiclient.errors.HttpError as e: - logging.error('Http Error for {}: {}'.format(filename, e)) - except KeyError as e2: - logging.error('Key error: {}'.format(e2)) - - -class TextAnalyzer(object): - """Construct and use the Google Natural Language API service.""" - - def __init__(self, db_filename=None): - self.service = googleapiclient.discovery.build('language', 'v1') - - # This list will store the entity information gleaned from the - # image files. - self.entity_info = [] - - # This is the filename of the sqlite3 database to save to - self.db_filename = db_filename or 'entities{}.db'.format( - int(time.time())) - - def _get_native_encoding_type(self): - """Returns the encoding type that matches Python's native strings.""" - if sys.maxunicode == 65535: - return 'UTF16' - else: - return 'UTF32' - - def nl_detect(self, text): - """Use the Natural Language API to analyze the given text string.""" - # We're only requesting 'entity' information from the Natural Language - # API at this time. - body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encodingType': self._get_native_encoding_type(), - } - entities = [] - try: - request = self.service.documents().analyzeEntities(body=body) - response = request.execute() - entities = response['entities'] - except googleapiclient.errors.HttpError as e: - logging.error('Http Error: %s' % e) - except KeyError as e2: - logging.error('Key error: %s' % e2) - return entities - - def add_entities(self, filename, locale, document): - """Apply the Natural Language API to the document, and collect the - detected entities.""" - - # Apply the Natural Language API to the document. - entities = self.nl_detect(document) - self.extract_and_save_entity_info(entities, locale, filename) - - def extract_entity_info(self, entity): - """Extract information about an entity.""" - type = entity['type'] - name = entity['name'].lower() - metadata = entity['metadata'] - salience = entity['salience'] - wiki_url = metadata.get('wikipedia_url', None) - return (type, name, salience, wiki_url) - - def extract_and_save_entity_info(self, entities, locale, filename): - for entity in entities: - type, name, salience, wiki_url = self.extract_entity_info(entity) - # Because this is a small example, we're using a list to hold - # all the entity information, then we'll insert it into the - # database all at once when we've processed all the files. - # For a larger data set, you would want to write to the database - # in batches. - self.entity_info.append( - (locale, type, name, salience, wiki_url, filename)) - - def write_entity_info_to_db(self): - """Store the info gleaned about the entities in the text, via the - Natural Language API, in an sqlite3 database table, and then print out - some simple analytics. - """ - logging.info('Saving entity info to the sqlite3 database.') - # Create the db. - with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: - with conn as cursor: - # Create table - cursor.execute( - 'CREATE TABLE if not exists entities (locale text, ' - 'type text, name text, salience real, wiki_url text, ' - 'filename text)') - with conn as cursor: - # Load all the data - cursor.executemany( - 'INSERT INTO entities VALUES (?,?,?,?,?,?)', - self.entity_info) - - def output_entity_data(self): - """Output some info about the entities by querying the generated - sqlite3 database. - """ - - with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: - - # This query finds the number of times each entity name was - # detected, in descending order by count, and returns information - # about the first 15 names, including the files in which they were - # found, their detected 'salience' and language (locale), and the - # wikipedia urls (if any) associated with them. - print('\n==============\nTop 15 most frequent entity names:') - - cursor = conn.cursor() - results = cursor.execute( - 'select name, count(name) as wc from entities ' - 'group by name order by wc desc limit 15;') - - for item in results: - cursor2 = conn.cursor() - print(u'\n----Name: {} was found with count {}'.format(*item)) - results2 = cursor2.execute( - 'SELECT name, type, filename, locale, wiki_url, salience ' - 'FROM entities WHERE name=?', (item[0],)) - urls = set() - for elt in results2: - print(('Found in file {}, detected as type {}, with\n' - ' locale {} and salience {}.').format( - elt[2], elt[1], elt[3], elt[5])) - if elt[4]: - urls.add(elt[4]) - if urls: - print('url(s): {}'.format(urls)) - - # This query finds the number of times each wikipedia url was - # detected, in descending order by count, and returns information - # about the first 15 urls, including the files in which they were - # found and the names and 'salience' with which they were - # associated. - print('\n==============\nTop 15 most frequent Wikipedia URLs:') - c = conn.cursor() - results = c.execute( - 'select wiki_url, count(wiki_url) as wc from entities ' - 'group by wiki_url order by wc desc limit 15;') - - for item in results: - cursor2 = conn.cursor() - print('\n----entity: {} was found with count {}'.format(*item)) - results2 = cursor2.execute( - 'SELECT name, type, filename, locale, salience ' - 'FROM entities WHERE wiki_url=?', (item[0],)) - names = set() - salience = set() - for elt in results2: - print(('Found in file {}, detected as type {}, with\n' - ' locale {}.').format(elt[2], elt[1], elt[3])) - names.add(elt[0]) - salience.add(elt[4]) - print('names(s): {}'.format(names)) - print('salience measure(s): {}'.format(salience)) - - -def extract_description(texts): - """Returns text annotations as a single string""" - document = [] - - for text in texts: - try: - document.append(text['description']) - locale = text['locale'] - # Process only the first entry, which contains all - # text detected. - break - except KeyError as e: - logging.error('KeyError: %s\n%s' % (e, text)) - return (locale, ' '.join(document)) - - -def extract_descriptions(input_filename, texts, text_analyzer): - """Gets the text that was detected in the image.""" - if texts: - locale, document = extract_description(texts) - text_analyzer.add_entities(input_filename, locale, document) - sys.stdout.write('.') # Output a progress indicator. - sys.stdout.flush() - elif texts == []: - print('%s had no discernible text.' % input_filename) - - -def get_text_from_files(vision, input_filenames, text_analyzer): - """Call the Vision API on a file and index the results.""" - texts = vision.detect_text(input_filenames) - if texts: - for filename, text in texts.items(): - extract_descriptions(filename, text, text_analyzer) - - -def batch(list_to_batch, batch_size=BATCH_SIZE): - """Group a list into batches of size batch_size. - - >>> tuple(batch([1, 2, 3, 4, 5], batch_size=2)) - ((1, 2), (3, 4), (5)) - """ - for i in range(0, len(list_to_batch), batch_size): - yield tuple(list_to_batch[i:i + batch_size]) - - -def main(input_dir, db_filename=None): - """Walk through all the image files in the given directory, extracting any - text from them and feeding that text to the Natural Language API for - analysis. - """ - # Create a client object for the Vision API - vision_api_client = VisionApi() - # Create an object to analyze our text using the Natural Language API - text_analyzer = TextAnalyzer(db_filename) - - if input_dir: - allfileslist = [] - # Recursively construct a list of all the files in the given input - # directory. - for folder, subs, files in os.walk(input_dir): - for filename in files: - allfileslist.append(os.path.join(folder, filename)) - - # Analyze the text in the files using the Vision and Natural Language - # APIs. - for filenames in batch(allfileslist, batch_size=1): - get_text_from_files(vision_api_client, filenames, text_analyzer) - - # Save the result to a database, then run some queries on the database, - # with output to STDOUT. - text_analyzer.write_entity_info_to_db() - - # now, print some information about the entities detected. - text_analyzer.output_entity_data() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='Detects text in the images in the given directory.') - parser.add_argument( - '--input_directory', - help='The image directory you\'d like to detect text in. If left ' - 'unspecified, the --db specified will be queried without being ' - 'updated.') - parser.add_argument( - '--db', help='The filename to use for the sqlite3 database.') - args = parser.parse_args() - - if not (args.input_directory or args.db): - parser.error('Either --input_directory or --db must be specified.') - - main(args.input_directory, args.db) diff --git a/language/snippets/ocr_nl/main_test.py b/language/snippets/ocr_nl/main_test.py deleted file mode 100755 index 5a8f72f233a4..000000000000 --- a/language/snippets/ocr_nl/main_test.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -import zipfile - -import requests - -import main - -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_IMAGE_URI = 'gs://{}/language/image8.png'.format(BUCKET) -OCR_IMAGES_URI = 'http://storage.googleapis.com/{}/{}'.format( - BUCKET, 'language/ocr_nl-images-small.zip') - - -def test_batch_empty(): - for batch_size in range(1, 10): - assert len( - list(main.batch([], batch_size=batch_size))) == 0 - - -def test_batch_single(): - for batch_size in range(1, 10): - batched = tuple(main.batch([1], batch_size=batch_size)) - assert batched == ((1,),) - - -def test_single_image_returns_text(): - vision_api_client = main.VisionApi() - - image_path = TEST_IMAGE_URI - texts = vision_api_client.detect_text([image_path]) - - assert image_path in texts - _, document = main.extract_description(texts[image_path]) - assert "daughter" in document - assert "Bennet" in document - assert "hat" in document - - -def test_single_nonimage_returns_error(): - vision_api_client = main.VisionApi() - texts = vision_api_client.detect_text(['README.md']) - assert "README.md" not in texts - - -def test_text_returns_entities(): - text = "Holmes and Watson walked to the cafe." - text_analyzer = main.TextAnalyzer() - entities = text_analyzer.nl_detect(text) - assert entities - etype, ename, salience, wurl = text_analyzer.extract_entity_info( - entities[0]) - assert ename == 'holmes' - - -def test_entities_list(): - vision_api_client = main.VisionApi() - image_path = TEST_IMAGE_URI - texts = vision_api_client.detect_text([image_path]) - locale, document = main.extract_description(texts[image_path]) - text_analyzer = main.TextAnalyzer() - entities = text_analyzer.nl_detect(document) - assert entities - etype, ename, salience, wurl = text_analyzer.extract_entity_info( - entities[0]) - assert ename == 'bennet' - - -def test_main(tmpdir, capsys): - images_path = str(tmpdir.mkdir('images')) - - # First, pull down some test data - response = requests.get(OCR_IMAGES_URI) - images_file = tmpdir.join('images.zip') - images_file.write_binary(response.content) - - # Extract it to the image directory - with zipfile.ZipFile(str(images_file)) as zfile: - zfile.extractall(images_path) - - main.main(images_path, str(tmpdir.join('ocr_nl.db'))) - - stdout, _ = capsys.readouterr() - - assert re.search(r'.* found with count', stdout) diff --git a/language/snippets/ocr_nl/requirements.txt b/language/snippets/ocr_nl/requirements.txt deleted file mode 100644 index 5e9029185cdc..000000000000 --- a/language/snippets/ocr_nl/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/README.md b/language/snippets/syntax_triples/README.md deleted file mode 100644 index 551057e7217d..000000000000 --- a/language/snippets/syntax_triples/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Using the Cloud Natural Language API to find subject-verb-object triples in text - -[![Open in Cloud Shell][shell_img]][shell_link] - -[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png -[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/syntax_triples/README.md - -This example finds subject-verb-object triples in a given piece of text using -syntax analysis capabilities of -[Cloud Natural Language API](https://cloud.google.com/natural-language/). -To do this, it calls the extractSyntax feature of the API -and uses the dependency parse tree and part-of-speech tags in the resposne -to build the subject-verb-object triples. The results are printed to STDOUT. -This type of analysis can be considered as the -first step towards an information extraction task. - -## Set Up to Authenticate With Your Project's Credentials - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -## Running the example - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -``` -$ pip install -r requirements.txt -``` -You must also be set up to authenticate with the Cloud APIs using your -project's service account credentials, as described above. - -Then, run the script on a file containing the text that you wish to analyze. -The text must be encoded in UTF8 or ASCII: - -``` -$ python main.py -``` - -Try this on a sample text in the resources directory: - -``` -$ python main.py resources/obama_wikipedia.txt -``` - -## A walkthrough of the example and its results - -Let's take a look at what the example generates when run on the -`obama_wikipedia.txt` sample file, and how it does it. - -The goal is to find all subject-verb-object -triples in the text. The example first sends the text to the Cloud Natural -Language API to perform extractSyntax analysis. Then, using part-of-speech tags, - it finds all the verbs in the text. For each verb, it uses the dependency -parse tree information to find all the dependent tokens. - -For example, given the following sentence in the `obama_wikipedia.txt` file: - -``` -"He began his presidential campaign in 2007" -``` -The example finds the verb `began`, and `He`, `campaign`, and `in` as its -dependencies. Then the script enumerates the dependencies for each verb and -finds all the subjects and objects. For the sentence above, the found subject -and object are `He` and `campaign`. - -The next step is to complete each subject and object token by adding their -dependencies to them. For example, in the sentence above, `his` and -`presidential` are dependent tokens for `campaign`. This is done using the -dependency parse tree, similar to verb dependencies as explained above. The -final result is (`He`, `began`, `his presidential campaign`) triple for -the example sentence above. - -The script performs this analysis for the entire text and prints the result. -For the `obama_wikipedia.txt` file, the result is the following: - -```sh -+------------------------------+------------+------------------------------+ -| Obama | received | national attention | -+------------------------------+------------+------------------------------+ -| He | began | his presidential campaign | -+------------------------------+------------+------------------------------+ -| he | won | sufficient delegates in the | -| | | Democratic Party primaries | -+------------------------------+------------+------------------------------+ -| He | defeated | Republican nominee John | -| | | McCain | -``` diff --git a/language/snippets/syntax_triples/main.py b/language/snippets/syntax_triples/main.py deleted file mode 100644 index bbe2386634ed..000000000000 --- a/language/snippets/syntax_triples/main.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This example finds subject-verb-object triples in a given piece of text using -the syntax analysis capabilities of Cloud Natural Language API. The triples are -printed to STDOUT. This can be considered as the first step towards an -information extraction task. - -Run the script on a file containing the text that you wish to analyze. -The text must be encoded in UTF8 or ASCII: - $ python main.py - -Try this on a sample text in the resources directory: - $ python main.py resources/obama_wikipedia.txt -""" - -import argparse -import sys -import textwrap - -import googleapiclient.discovery - - -def dependents(tokens, head_index): - """Returns an ordered list of the token indices of the dependents for - the given head.""" - # Create head->dependency index. - head_to_deps = {} - for i, token in enumerate(tokens): - head = token['dependencyEdge']['headTokenIndex'] - if i != head: - head_to_deps.setdefault(head, []).append(i) - return head_to_deps.get(head_index, ()) - - -def phrase_text_for_head(tokens, text, head_index): - """Returns the entire phrase containing the head token - and its dependents. - """ - begin, end = phrase_extent_for_head(tokens, head_index) - return text[begin:end] - - -def phrase_extent_for_head(tokens, head_index): - """Returns the begin and end offsets for the entire phrase - containing the head token and its dependents. - """ - begin = tokens[head_index]['text']['beginOffset'] - end = begin + len(tokens[head_index]['text']['content']) - for child in dependents(tokens, head_index): - child_begin, child_end = phrase_extent_for_head(tokens, child) - begin = min(begin, child_begin) - end = max(end, child_end) - return (begin, end) - - -def analyze_syntax(text): - """Use the NL API to analyze the given text string, and returns the - response from the API. Requests an encodingType that matches - the encoding used natively by Python. Raises an - errors.HTTPError if there is a connection problem. - """ - service = googleapiclient.discovery.build('language', 'v1beta1') - body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'features': { - 'extract_syntax': True, - }, - 'encodingType': get_native_encoding_type(), - } - request = service.documents().annotateText(body=body) - return request.execute() - - -def get_native_encoding_type(): - """Returns the encoding type that matches Python's native strings.""" - if sys.maxunicode == 65535: - return 'UTF16' - else: - return 'UTF32' - - -def find_triples(tokens, - left_dependency_label='NSUBJ', - head_part_of_speech='VERB', - right_dependency_label='DOBJ'): - """Generator function that searches the given tokens - with the given part of speech tag, that have dependencies - with the given labels. For each such head found, yields a tuple - (left_dependent, head, right_dependent), where each element of the - tuple is an index into the tokens array. - """ - for head, token in enumerate(tokens): - if token['partOfSpeech']['tag'] == head_part_of_speech: - children = dependents(tokens, head) - left_deps = [] - right_deps = [] - for child in children: - child_token = tokens[child] - child_dep_label = child_token['dependencyEdge']['label'] - if child_dep_label == left_dependency_label: - left_deps.append(child) - elif child_dep_label == right_dependency_label: - right_deps.append(child) - for left_dep in left_deps: - for right_dep in right_deps: - yield (left_dep, head, right_dep) - - -def show_triple(tokens, text, triple): - """Prints the given triple (left, head, right). For left and right, - the entire phrase headed by each token is shown. For head, only - the head token itself is shown. - - """ - nsubj, verb, dobj = triple - - # Extract the text for each element of the triple. - nsubj_text = phrase_text_for_head(tokens, text, nsubj) - verb_text = tokens[verb]['text']['content'] - dobj_text = phrase_text_for_head(tokens, text, dobj) - - # Pretty-print the triple. - left = textwrap.wrap(nsubj_text, width=28) - mid = textwrap.wrap(verb_text, width=10) - right = textwrap.wrap(dobj_text, width=28) - print('+' + 30 * '-' + '+' + 12 * '-' + '+' + 30 * '-' + '+') - for l, m, r in zip(left, mid, right): - print('| {:<28s} | {:<10s} | {:<28s} |'.format( - l or '', m or '', r or '')) - - -def main(text_file): - # Extracts subject-verb-object triples from the given text file, - # and print each one. - - # Read the input file. - text = open(text_file, 'rb').read().decode('utf8') - - analysis = analyze_syntax(text) - tokens = analysis.get('tokens', []) - - for triple in find_triples(tokens): - show_triple(tokens, text, triple) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'text_file', - help='A file containing the document to process. ' - 'Should be encoded in UTF8 or ASCII') - args = parser.parse_args() - main(args.text_file) diff --git a/language/snippets/syntax_triples/main_test.py b/language/snippets/syntax_triples/main_test.py deleted file mode 100755 index 6aa87818e35b..000000000000 --- a/language/snippets/syntax_triples/main_test.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re - -import main - -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') - - -def test_dependents(): - text = "I am eating a delicious banana" - analysis = main.analyze_syntax(text) - tokens = analysis.get('tokens', []) - assert [0, 1, 5] == main.dependents(tokens, 2) - assert [3, 4] == main.dependents(tokens, 5) - - -def test_phrase_text_for_head(): - text = "A small collection of words" - analysis = main.analyze_syntax(text) - tokens = analysis.get('tokens', []) - assert "words" == main.phrase_text_for_head(tokens, text, 4) - - -def test_find_triples(): - text = "President Obama won the noble prize" - analysis = main.analyze_syntax(text) - tokens = analysis.get('tokens', []) - triples = main.find_triples(tokens) - for triple in triples: - assert (1, 2, 5) == triple - - -def test_obama_example(capsys): - main.main(os.path.join(RESOURCES, 'obama_wikipedia.txt')) - stdout, _ = capsys.readouterr() - lines = stdout.split('\n') - assert re.match( - r'.*Obama\b.*\| received\b.*\| national attention\b', - lines[1]) diff --git a/language/snippets/syntax_triples/requirements.txt b/language/snippets/syntax_triples/requirements.txt deleted file mode 100644 index 5e9029185cdc..000000000000 --- a/language/snippets/syntax_triples/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 diff --git a/language/snippets/syntax_triples/resources/obama_wikipedia.txt b/language/snippets/syntax_triples/resources/obama_wikipedia.txt deleted file mode 100644 index 1e89d4ab0818..000000000000 --- a/language/snippets/syntax_triples/resources/obama_wikipedia.txt +++ /dev/null @@ -1 +0,0 @@ -In 2004, Obama received national attention during his campaign to represent Illinois in the United States Senate with his victory in the March Democratic Party primary, his keynote address at the Democratic National Convention in July, and his election to the Senate in November. He began his presidential campaign in 2007 and, after a close primary campaign against Hillary Clinton in 2008, he won sufficient delegates in the Democratic Party primaries to receive the presidential nomination. He then defeated Republican nominee John McCain in the general election, and was inaugurated as president on January 20, 2009. Nine months after his inauguration, Obama was named the 2009 Nobel Peace Prize laureate. diff --git a/language/snippets/tutorial/README.rst b/language/snippets/tutorial/README.rst deleted file mode 100644 index 3f83c1a2c640..000000000000 --- a/language/snippets/tutorial/README.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Natural Language Tutorial Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/README.rst - - -This directory contains samples for Google Cloud Natural Language Tutorial. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - - - - -.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Language tutorial -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py,language/tutorial/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python tutorial.py - - usage: tutorial.py [-h] movie_review_filename - - positional arguments: - movie_review_filename - The filename of the movie review you'd like to - analyze. - - optional arguments: - -h, --help show this help message and exit - - - - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/language/snippets/tutorial/README.rst.in b/language/snippets/tutorial/README.rst.in deleted file mode 100644 index 945c701e510e..000000000000 --- a/language/snippets/tutorial/README.rst.in +++ /dev/null @@ -1,22 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Natural Language Tutorial - short_name: Cloud Natural Language Tutorial - url: https://cloud.google.com/natural-language/docs/ - description: > - The `Google Cloud Natural Language API`_ provides natural language - understanding technologies to developers, including sentiment analysis, - entity recognition, and syntax analysis. This API is part of the larger - Cloud Machine Learning API. - -setup: -- auth -- install_deps - -samples: -- name: Language tutorial - file: tutorial.py - show_help: true - -folder: language/tutorial \ No newline at end of file diff --git a/language/snippets/tutorial/requirements.txt b/language/snippets/tutorial/requirements.txt deleted file mode 100644 index 5e9029185cdc..000000000000 --- a/language/snippets/tutorial/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 diff --git a/language/snippets/tutorial/reviews/bladerunner-mixed.txt b/language/snippets/tutorial/reviews/bladerunner-mixed.txt deleted file mode 100644 index 3b520b65a8a7..000000000000 --- a/language/snippets/tutorial/reviews/bladerunner-mixed.txt +++ /dev/null @@ -1,19 +0,0 @@ -I really wanted to love 'Bladerunner' but ultimately I couldn't get -myself to appreciate it fully. However, you may like it if you're into -science fiction, especially if you're interested in the philosophical -exploration of what it means to be human or machine. Some of the gizmos -like the flying cars and the Vouight-Kampff machine (which seemed very -steampunk), were quite cool. - -I did find the plot pretty slow and but the dialogue and action sequences -were good. Unlike most science fiction films, this one was mostly quiet, and -not all that much happened, except during the last 15 minutes. I didn't -understand why a unicorn was in the movie. The visual effects were fantastic, -however, and the musical score and overall mood was quite interesting. -A futurist Los Angeles that was both highly polished and also falling apart -reminded me of 'Outland.' Certainly, the style of the film made up for -many of its pedantic plot holes. - -If you want your sci-fi to be lasers and spaceships, 'Bladerunner' may -disappoint you. But if you want it to make you think, this movie may -be worth the money. \ No newline at end of file diff --git a/language/snippets/tutorial/reviews/bladerunner-neg.txt b/language/snippets/tutorial/reviews/bladerunner-neg.txt deleted file mode 100644 index dbef76271d16..000000000000 --- a/language/snippets/tutorial/reviews/bladerunner-neg.txt +++ /dev/null @@ -1,3 +0,0 @@ -What was Hollywood thinking with this movie! I hated, -hated, hated it. BORING! I went afterwards and demanded my money back. -They refused. \ No newline at end of file diff --git a/language/snippets/tutorial/reviews/bladerunner-neutral.txt b/language/snippets/tutorial/reviews/bladerunner-neutral.txt deleted file mode 100644 index 60556e604be9..000000000000 --- a/language/snippets/tutorial/reviews/bladerunner-neutral.txt +++ /dev/null @@ -1,2 +0,0 @@ -I neither liked nor disliked this movie. Parts were interesting, but -overall I was left wanting more. The acting was pretty good. \ No newline at end of file diff --git a/language/snippets/tutorial/reviews/bladerunner-pos.txt b/language/snippets/tutorial/reviews/bladerunner-pos.txt deleted file mode 100644 index a7faf81570b3..000000000000 --- a/language/snippets/tutorial/reviews/bladerunner-pos.txt +++ /dev/null @@ -1,10 +0,0 @@ -`Bladerunner` is often touted as one of the best science fiction films ever -made. Indeed, it satisfies many of the requisites for good sci-fi: a future -world with flying cars and humanoid robots attempting to rebel against their -creators. But more than anything, `Bladerunner` is a fantastic exploration -of the nature of what it means to be human. If we create robots which can -think, will they become human? And if they do, what makes us unique? Indeed, -how can we be sure we're not human in any case? `Bladerunner` explored -these issues before such movies as `The Matrix,' and did so intelligently. -The visual effects and score by Vangelis set the mood. See this movie -in a dark theatre to appreciate it fully. Highly recommended! \ No newline at end of file diff --git a/language/snippets/tutorial/tutorial.py b/language/snippets/tutorial/tutorial.py deleted file mode 100644 index 5d14b223e780..000000000000 --- a/language/snippets/tutorial/tutorial.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# [START full_tutorial_script] -# [START import_libraries] -import argparse -import io - -import googleapiclient.discovery -# [END import_libraries] - - -def print_sentiment(filename): - """Prints sentiment analysis on a given file contents.""" - # [START authenticating_to_the_api] - service = googleapiclient.discovery.build('language', 'v1') - # [END authenticating_to_the_api] - - # [START constructing_the_request] - with io.open(filename, 'r') as review_file: - review_file_contents = review_file.read() - - service_request = service.documents().analyzeSentiment( - body={ - 'document': { - 'type': 'PLAIN_TEXT', - 'content': review_file_contents, - } - } - ) - response = service_request.execute() - # [END constructing_the_request] - - # [START parsing_the_response] - score = response['documentSentiment']['score'] - magnitude = response['documentSentiment']['magnitude'] - - for n, sentence in enumerate(response['sentences']): - sentence_sentiment = sentence['sentiment']['score'] - print('Sentence {} has a sentiment score of {}'.format(n, - sentence_sentiment)) - - print('Overall Sentiment: score of {} with magnitude of {}'.format( - score, magnitude)) - # [END parsing_the_response] - - -# [START running_your_application] -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - 'movie_review_filename', - help='The filename of the movie review you\'d like to analyze.') - args = parser.parse_args() - print_sentiment(args.movie_review_filename) -# [END running_your_application] -# [END full_tutorial_script] diff --git a/language/snippets/tutorial/tutorial_test.py b/language/snippets/tutorial/tutorial_test.py deleted file mode 100644 index 065076fb4cc7..000000000000 --- a/language/snippets/tutorial/tutorial_test.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016, Google, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re - -import tutorial - - -def test_neutral(capsys): - tutorial.print_sentiment('reviews/bladerunner-neutral.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of -?[0-2]\.?[0-9]? with ' - r'magnitude of [0-1]\.?[0-9]?', out, re.I) - - -def test_pos(capsys): - tutorial.print_sentiment('reviews/bladerunner-pos.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of [0-9]\.?[0-9]? with ' - r'magnitude of [0-9]\.?[0-9]?', out, re.I) - - -def test_neg(capsys): - tutorial.print_sentiment('reviews/bladerunner-neg.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of -[0-9]\.?[0-9]? with ' - r'magnitude of [2-7]\.?[0-9]?', out, re.I) - - -def test_mixed(capsys): - tutorial.print_sentiment('reviews/bladerunner-mixed.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of -?[0-9]\.?[0-9]? with ' - r'magnitude of [3-6]\.?[0-9]?', out, re.I) From 6ebffe8d8e7b4235e7a3b2b6774a18e46829669d Mon Sep 17 00:00:00 2001 From: Charles Engelke Date: Fri, 19 Oct 2018 15:21:41 -0700 Subject: [PATCH 127/323] Fixed name of model [(#1779)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1779) * Fixed name of model * update model ids --- language/snippets/automl/predict_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/automl/predict_test.py b/language/snippets/automl/predict_test.py index 6cf2c69a0e72..f511302d58ba 100644 --- a/language/snippets/automl/predict_test.py +++ b/language/snippets/automl/predict_test.py @@ -23,7 +23,7 @@ def test_predict(capsys): - model_id = "3472481026502981088" + model_id = "TCN3472481026502981088" automl_natural_language_predict.predict( project_id, compute_region, model_id, "resources/test.txt" ) From 9a806423379749c47fae991a6d8ef7b6d45ce336 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 20 Nov 2018 15:40:29 -0800 Subject: [PATCH 128/323] Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 5e9029185cdc..a1a63f75b4be 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.4 -google-auth==1.5.1 +google-auth==1.6.1 google-auth-httplib2==0.0.3 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index d045e22d00e3..330f8f988be3 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.0.2 -numpy==1.15.1 +google-cloud-language==1.1.0 +numpy==1.15.4 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 2cbc37eb15b1..7029093e9515 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.2 +google-cloud-language==1.1.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 2cbc37eb15b1..7029093e9515 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.2 +google-cloud-language==1.1.0 From 2d77d1fece35bbab0526aaa7c0e17160579f972e Mon Sep 17 00:00:00 2001 From: Shahin Date: Tue, 4 Dec 2018 15:39:10 -0800 Subject: [PATCH 129/323] Moved the imports and region tags inside the functions [(#1891)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1891) * Moved the imports and region tags inside the functions * Removed the unnecessary imports * Added the missing import (six) to the functions * Removed the extra whitespaces * Changes based on Alix's comments. - Sample files no longer have input arguments - Input texts and uri's are hard coded - unit tests are modified accordingly * Remove extra whitespace * Removed extra whitespaces * Removed unused import * Removed the extra + signs --- language/snippets/cloud-client/v1/snippets.py | 174 +++++++++++------- .../snippets/cloud-client/v1/snippets_test.py | 43 ++--- 2 files changed, 120 insertions(+), 97 deletions(-) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 826c28c54f1a..6ccfaf1627fb 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2016 Google, Inc. +# Copyright 2018 Google, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,15 +24,16 @@ import argparse import sys -from google.cloud import language -from google.cloud.language import enums -from google.cloud.language import types -import six +def sentiment_text(): + # [START language_sentiment_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + text = 'Hello, world!' -# [START language_sentiment_text] -def sentiment_text(text): - """Detects sentiment in the text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -51,12 +52,17 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) # [END language_python_migration_sentiment_text] -# [END language_sentiment_text] + # [END language_sentiment_text] + + +def sentiment_file(): + # [START language_sentiment_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + gcs_uri = 'gs://cloud-samples-data/language/hello.txt' -# [START language_sentiment_gcs] -def sentiment_file(gcs_uri): - """Detects sentiment in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() # Instantiates a plain text document. @@ -72,12 +78,18 @@ def sentiment_file(gcs_uri): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) -# [END language_sentiment_gcs] + # [END language_sentiment_gcs] + + +def entities_text(): + # [START language_entities_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + text = 'President Kennedy spoke at the White House.' -# [START language_entities_text] -def entities_text(text): - """Detects entities in the text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -105,12 +117,17 @@ def entities_text(text): print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) # [END language_python_migration_entities_text] -# [END language_entities_text] + # [END language_entities_text] -# [START language_entities_gcs] -def entities_file(gcs_uri): - """Detects entities in the file located in Google Cloud Storage.""" +def entities_file(): + # [START language_entities_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + gcs_uri = 'gs://cloud-samples-data/language/president.txt' + client = language.LanguageServiceClient() # Instantiates a plain text document. @@ -131,12 +148,18 @@ def entities_file(gcs_uri): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) -# [END language_entities_gcs] + # [END language_entities_gcs] + +def syntax_text(): + # [START language_syntax_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + text = 'President Kennedy spoke at the White House.' -# [START language_syntax_text] -def syntax_text(text): - """Detects syntax in the text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -157,12 +180,17 @@ def syntax_text(text): print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) # [END language_python_migration_syntax_text] -# [END language_syntax_text] + # [END language_syntax_text] + +def syntax_file(): + # [START language_syntax_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + gcs_uri = 'gs://cloud-samples-data/language/president.txt' -# [START language_syntax_gcs] -def syntax_file(gcs_uri): - """Detects syntax in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() # Instantiates a plain text document. @@ -178,12 +206,18 @@ def syntax_file(gcs_uri): part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) -# [END language_syntax_gcs] + # [END language_syntax_gcs] + + +def entity_sentiment_text(): + # [START language_entity_sentiment_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + text = 'President Kennedy spoke at the White House.' -# [START language_entity_sentiment_text] -def entity_sentiment_text(text): - """Detects entity sentiment in the provided text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -211,12 +245,17 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END language_entity_sentiment_text] + # [END language_entity_sentiment_text] + + +def entity_sentiment_file(): + # [START language_entity_sentiment_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + gcs_uri = 'gs://cloud-samples-data/language/president.txt' -# [START language_entity_sentiment_gcs] -def entity_sentiment_file(gcs_uri): - """Detects entity sentiment in a Google Cloud Storage file.""" client = language.LanguageServiceClient() document = types.Document( @@ -240,12 +279,20 @@ def entity_sentiment_file(gcs_uri): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END language_entity_sentiment_gcs] + # [END language_entity_sentiment_gcs] -# [START language_classify_text] -def classify_text(text): - """Classifies content categories of the provided text.""" +def classify_text(): + # [START language_classify_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + text = 'Android is a mobile operating system developed by Google, ' \ + 'based on the Linux kernel and designed primarily for ' \ + 'touchscreen mobile devices such as smartphones and tablets.' + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -261,14 +308,17 @@ def classify_text(text): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END language_classify_text] + # [END language_classify_text] + + +def classify_file(): + # [START language_classify_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + gcs_uri = 'gs://cloud-samples-data/language/android.txt' -# [START language_classify_gcs] -def classify_file(gcs_uri): - """Classifies content categories of the text in a Google Cloud Storage - file. - """ client = language.LanguageServiceClient() document = types.Document( @@ -281,7 +331,7 @@ def classify_file(gcs_uri): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END language_classify_gcs] + # [END language_classify_gcs] if __name__ == '__main__': @@ -292,63 +342,53 @@ def classify_file(gcs_uri): classify_text_parser = subparsers.add_parser( 'classify-text', help=classify_text.__doc__) - classify_text_parser.add_argument('text') classify_text_parser = subparsers.add_parser( 'classify-file', help=classify_file.__doc__) - classify_text_parser.add_argument('gcs_uri') sentiment_entities_text_parser = subparsers.add_parser( 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - sentiment_entities_text_parser.add_argument('text') sentiment_entities_file_parser = subparsers.add_parser( 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - sentiment_entities_file_parser.add_argument('gcs_uri') sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) - sentiment_text_parser.add_argument('text') sentiment_file_parser = subparsers.add_parser( 'sentiment-file', help=sentiment_file.__doc__) - sentiment_file_parser.add_argument('gcs_uri') entities_text_parser = subparsers.add_parser( 'entities-text', help=entities_text.__doc__) - entities_text_parser.add_argument('text') entities_file_parser = subparsers.add_parser( 'entities-file', help=entities_file.__doc__) - entities_file_parser.add_argument('gcs_uri') syntax_text_parser = subparsers.add_parser( 'syntax-text', help=syntax_text.__doc__) - syntax_text_parser.add_argument('text') syntax_file_parser = subparsers.add_parser( 'syntax-file', help=syntax_file.__doc__) - syntax_file_parser.add_argument('gcs_uri') args = parser.parse_args() if args.command == 'sentiment-text': - sentiment_text(args.text) + sentiment_text() elif args.command == 'sentiment-file': - sentiment_file(args.gcs_uri) + sentiment_file() elif args.command == 'entities-text': - entities_text(args.text) + entities_text() elif args.command == 'entities-file': - entities_file(args.gcs_uri) + entities_file() elif args.command == 'syntax-text': - syntax_text(args.text) + syntax_text() elif args.command == 'syntax-file': - syntax_file(args.gcs_uri) + syntax_file() elif args.command == 'sentiment-entities-text': - entity_sentiment_text(args.text) + entity_sentiment_text() elif args.command == 'sentiment-entities-file': - entity_sentiment_file(args.gcs_uri) + entity_sentiment_file() elif args.command == 'classify-text': - classify_text(args.text) + classify_text() elif args.command == 'classify-file': - classify_file(args.gcs_uri) + classify_file() diff --git a/language/snippets/cloud-client/v1/snippets_test.py b/language/snippets/cloud-client/v1/snippets_test.py index 27fbee24d92d..ef09b1a17f72 100644 --- a/language/snippets/cloud-client/v1/snippets_test.py +++ b/language/snippets/cloud-client/v1/snippets_test.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 Google, Inc. +# Copyright 2018 Google, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,85 +13,68 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import snippets -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) -LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) - def test_sentiment_text(capsys): - snippets.sentiment_text('No! God please, no!') + snippets.sentiment_text() out, _ = capsys.readouterr() assert 'Score: ' in out def test_sentiment_file(capsys): - snippets.sentiment_file(TEST_FILE_URL) + snippets.sentiment_file() out, _ = capsys.readouterr() assert 'Score: ' in out def test_entities_text(capsys): - snippets.entities_text('President Obama is speaking at the White House.') + snippets.entities_text() out, _ = capsys.readouterr() assert 'name' in out - assert ': Obama' in out + assert ': Kennedy' in out def test_entities_file(capsys): - snippets.entities_file(TEST_FILE_URL) + snippets.entities_file() out, _ = capsys.readouterr() assert 'name' in out - assert ': Obama' in out + assert ': Kennedy' in out def test_syntax_text(capsys): - snippets.syntax_text('President Obama is speaking at the White House.') + snippets.syntax_text() out, _ = capsys.readouterr() assert 'NOUN: President' in out def test_syntax_file(capsys): - snippets.syntax_file(TEST_FILE_URL) + snippets.syntax_file() out, _ = capsys.readouterr() assert 'NOUN: President' in out def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text( - 'President Obama is speaking at the White House.') + snippets.entity_sentiment_text() out, _ = capsys.readouterr() assert 'Content : White House' in out def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file(TEST_FILE_URL) + snippets.entity_sentiment_file() out, _ = capsys.readouterr() assert 'Content : White House' in out -def test_sentiment_entities_utf(capsys): - snippets.entity_sentiment_text( - 'foo→bar') - out, _ = capsys.readouterr() - assert 'Begin Offset : 4' in out - - def test_classify_text(capsys): - snippets.classify_text( - 'Android is a mobile operating system developed by Google, ' - 'based on the Linux kernel and designed primarily for touchscreen ' - 'mobile devices such as smartphones and tablets.') + snippets.classify_text() out, _ = capsys.readouterr() assert 'name' in out assert '/Computers & Electronics' in out def test_classify_file(capsys): - snippets.classify_file(LONG_TEST_FILE_URL) + snippets.classify_file() out, _ = capsys.readouterr() assert 'name' in out assert '/Computers & Electronics' in out From 5fb10cfa8c106d43a64252239c82a5f0f3466293 Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Thu, 6 Dec 2018 15:55:38 -0800 Subject: [PATCH 130/323] Fix decode [(#1911)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1911) * fix decode problem * fix decode * fix decode --- language/snippets/cloud-client/v1/snippets.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 6ccfaf1627fb..39712c8b10a7 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -27,7 +27,6 @@ def sentiment_text(): # [START language_sentiment_text] - import six from google.cloud import language from google.cloud.language import enums from google.cloud.language import types @@ -36,8 +35,10 @@ def sentiment_text(): client = language.LanguageServiceClient() - if isinstance(text, six.binary_type): + try: text = text.decode('utf-8') + except AttributeError: + pass # Instantiates a plain text document. # [START language_python_migration_sentiment_text] From e4efe7d7046942f6821098c7de10cdd684be556e Mon Sep 17 00:00:00 2001 From: Noah Negrey Date: Fri, 7 Dec 2018 09:27:27 -0800 Subject: [PATCH 131/323] Update sample output [(#1893)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1893) * Update sample output * Update snippets.py * Update snippets.py --- language/snippets/cloud-client/v1/snippets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py index 39712c8b10a7..a95110a2b25c 100644 --- a/language/snippets/cloud-client/v1/snippets.py +++ b/language/snippets/cloud-client/v1/snippets.py @@ -113,10 +113,10 @@ def entities_text(): print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) + print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) # [END language_python_migration_entities_text] # [END language_entities_text] @@ -145,10 +145,10 @@ def entities_file(): print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) + print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) # [END language_entities_gcs] From e3d396bc2998c8c13bd581c6495fa715cfb116c0 Mon Sep 17 00:00:00 2001 From: DPEBot Date: Wed, 6 Feb 2019 12:06:35 -0800 Subject: [PATCH 132/323] Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/automl/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index a1a63f75b4be..7e4359ce08d3 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.7.4 -google-auth==1.6.1 +google-api-python-client==1.7.8 +google-auth==1.6.2 google-auth-httplib2==0.0.3 diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt index 9b692618364a..db96c59966cf 100644 --- a/language/snippets/automl/requirements.txt +++ b/language/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.1.1 +google-cloud-automl==0.1.2 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 330f8f988be3..8c31e5719d38 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.1.0 -numpy==1.15.4 +google-cloud-language==1.1.1 +numpy==1.16.1 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 7029093e9515..257f81db5dbf 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.0 +google-cloud-language==1.1.1 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 7029093e9515..257f81db5dbf 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.0 +google-cloud-language==1.1.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 7029093e9515..257f81db5dbf 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.0 +google-cloud-language==1.1.1 From facb890aaf932161c190666b201ef3af7b6e76d2 Mon Sep 17 00:00:00 2001 From: Charles Engelke Date: Mon, 29 Apr 2019 16:44:43 -0700 Subject: [PATCH 133/323] Update requirements.txt [(#2128)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2128) --- language/snippets/automl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt index db96c59966cf..ebc8794cf08e 100644 --- a/language/snippets/automl/requirements.txt +++ b/language/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.1.2 +google-cloud-automl==0.2.0 From f0c43d234dd09b4d0d25adb45778f5698c37934b Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 8 Oct 2019 09:53:32 -0700 Subject: [PATCH 134/323] Adds split updates for Firebase ... opencensus [(#2438)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2438) --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/automl/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 7e4359ce08d3..81808120b6ce 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.7.8 -google-auth==1.6.2 +google-api-python-client==1.7.11 +google-auth==1.6.3 google-auth-httplib2==0.0.3 diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt index ebc8794cf08e..6693c2417082 100644 --- a/language/snippets/automl/requirements.txt +++ b/language/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.2.0 +google-cloud-automl==0.5.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 8c31e5719d38..b5558c7ccc8c 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.1.1 -numpy==1.16.1 +google-cloud-language==1.3.0 +numpy==1.17.2 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 257f81db5dbf..0c011f546e87 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.1 +google-cloud-language==1.3.0 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 257f81db5dbf..0c011f546e87 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.1 +google-cloud-language==1.3.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 257f81db5dbf..0c011f546e87 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.1 +google-cloud-language==1.3.0 From d94a127d18f9ce306d611b620d90244c52ca92da Mon Sep 17 00:00:00 2001 From: Noah Negrey Date: Fri, 15 Nov 2019 15:15:24 -0700 Subject: [PATCH 135/323] Add Set Endpoint Samples [(#2497)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2497) * Add Set Endpoint Samples * Add additional test result option * Sample Request update * Add filter_ --- .../snippets/cloud-client/v1/set_endpoint.py | 40 +++++++++++++++++++ .../cloud-client/v1/set_endpoint_test.py | 22 ++++++++++ 2 files changed, 62 insertions(+) create mode 100644 language/snippets/cloud-client/v1/set_endpoint.py create mode 100644 language/snippets/cloud-client/v1/set_endpoint_test.py diff --git a/language/snippets/cloud-client/v1/set_endpoint.py b/language/snippets/cloud-client/v1/set_endpoint.py new file mode 100644 index 000000000000..abc6f180a523 --- /dev/null +++ b/language/snippets/cloud-client/v1/set_endpoint.py @@ -0,0 +1,40 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def set_endpoint(): + """Change your endpoint""" + # [START language_set_endpoint] + # Imports the Google Cloud client library + from google.cloud import language + + client_options = {'api_endpoint': 'eu-language.googleapis.com:443'} + + # Instantiates a client + client = language.LanguageServiceClient(client_options=client_options) + # [END language_set_endpoint] + + # The text to analyze + document = language.types.Document( + content='Hello, world!', + type=language.enums.Document.Type.PLAIN_TEXT) + + # Detects the sentiment of the text + sentiment = client.analyze_sentiment(document=document).document_sentiment + + print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + + +if __name__ == '__main__': + set_endpoint() diff --git a/language/snippets/cloud-client/v1/set_endpoint_test.py b/language/snippets/cloud-client/v1/set_endpoint_test.py new file mode 100644 index 000000000000..7e124c36a93d --- /dev/null +++ b/language/snippets/cloud-client/v1/set_endpoint_test.py @@ -0,0 +1,22 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import set_endpoint + + +def test_set_endpoint(capsys): + set_endpoint.set_endpoint() + + out, _ = capsys.readouterr() + assert 'Sentiment' in out From ba3e28f80ffe8ea158df671eb4aa8f68adeac152 Mon Sep 17 00:00:00 2001 From: DPEBot Date: Fri, 20 Dec 2019 17:41:38 -0800 Subject: [PATCH 136/323] Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 81808120b6ce..0237dc05ff61 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.11 -google-auth==1.6.3 +google-auth==1.10.0 google-auth-httplib2==0.0.3 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index b5558c7ccc8c..8a441e71dc0e 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.17.2 +numpy==1.17.4 From 05c86f8b00eb8987c994756b6bd81eff1aa4c9a6 Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Thu, 5 Mar 2020 14:22:12 -0800 Subject: [PATCH 137/323] chore(deps): update dependency google-auth to v1.11.2 [(#2724)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2724) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0237dc05ff61..c27ca15eca9f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.11 -google-auth==1.10.0 +google-auth==1.11.2 google-auth-httplib2==0.0.3 From 35d7c2752917303a45d6da5071604d698bd10241 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 6 Mar 2020 19:04:23 +0100 Subject: [PATCH 138/323] Update dependency google-cloud-automl to v0.10.0 [(#3033)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3033) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- language/snippets/automl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt index 6693c2417082..eb3be7610a1b 100644 --- a/language/snippets/automl/requirements.txt +++ b/language/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.5.0 +google-cloud-automl==0.10.0 From bbc7d56e7202351f99e65367b431d73033485ff4 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Tue, 10 Mar 2020 12:48:04 -0700 Subject: [PATCH 139/323] Remove unused region_tag comment. [(#3075)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3075) --- .../snippets/generated-samples/v1/language_sentiment_text.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/language/snippets/generated-samples/v1/language_sentiment_text.py b/language/snippets/generated-samples/v1/language_sentiment_text.py index d99f5d09c3a6..10d17970df08 100644 --- a/language/snippets/generated-samples/v1/language_sentiment_text.py +++ b/language/snippets/generated-samples/v1/language_sentiment_text.py @@ -29,7 +29,6 @@ def sample_analyze_sentiment(content): - # [START language_sentiment_text_core] client = language_v1.LanguageServiceClient() @@ -46,8 +45,6 @@ def sample_analyze_sentiment(content): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_sentiment_text_core] - # [END language_sentiment_text] From 7981a835e4bf94099d3bd36383119bcf262d112c Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Tue, 10 Mar 2020 13:00:04 -0700 Subject: [PATCH 140/323] Remove Natural Language samples not included docs. [(#3074)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3074) Remove Natural Language samples that are no longer included in product documentation. The samples used for documentation are now located in https://github.com/googleapis/python-language/tree/master/samples/v1 and thus have replaced the samples in this repo. --- language/snippets/cloud-client/v1/README.rst | 56 +-- .../snippets/cloud-client/v1/README.rst.in | 2 - language/snippets/cloud-client/v1/snippets.py | 395 ------------------ .../snippets/cloud-client/v1/snippets_test.py | 80 ---- 4 files changed, 2 insertions(+), 531 deletions(-) delete mode 100644 language/snippets/cloud-client/v1/snippets.py delete mode 100644 language/snippets/cloud-client/v1/snippets_test.py diff --git a/language/snippets/cloud-client/v1/README.rst b/language/snippets/cloud-client/v1/README.rst index 97f79a34e6a8..e0d719464c57 100644 --- a/language/snippets/cloud-client/v1/README.rst +++ b/language/snippets/cloud-client/v1/README.rst @@ -53,7 +53,7 @@ Install Dependencies $ virtualenv env $ source env/bin/activate -#. Install the dependencies needed to run the samples. +#. Install the dependencies needed to run the sample. .. code-block:: bash @@ -62,7 +62,7 @@ Install Dependencies .. _pip: https://pip.pypa.io/ .. _virtualenv: https://virtualenv.pypa.io/ -Samples +Sample ------------------------------------------------------------------------------- Quickstart @@ -81,58 +81,6 @@ To run this sample: $ python quickstart.py -Snippets -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py,language/cloud-client/v1/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python snippets.py - - usage: snippets.py [-h] - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - ... - - This application demonstrates how to perform basic operations with the - Google Cloud Natural Language API - - For more information, the documentation at - https://cloud.google.com/natural-language/docs. - - positional arguments: - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - classify-text Classifies content categories of the provided text. - classify-file Classifies content categories of the text in a Google - Cloud Storage file. - sentiment-entities-text - Detects entity sentiment in the provided text. - sentiment-entities-file - Detects entity sentiment in a Google Cloud Storage - file. - sentiment-text Detects sentiment in the text. - sentiment-file Detects sentiment in the file located in Google Cloud - Storage. - entities-text Detects entities in the text. - entities-file Detects entities in the file located in Google Cloud - Storage. - syntax-text Detects syntax in the text. - syntax-file Detects syntax in the file located in Google Cloud - Storage. - - optional arguments: - -h, --help show this help message and exit - - - - - The client library ------------------------------------------------------------------------------- diff --git a/language/snippets/cloud-client/v1/README.rst.in b/language/snippets/cloud-client/v1/README.rst.in index 06b7ff3e1953..9bf38dbf9105 100644 --- a/language/snippets/cloud-client/v1/README.rst.in +++ b/language/snippets/cloud-client/v1/README.rst.in @@ -23,8 +23,6 @@ setup: samples: - name: Quickstart file: quickstart.py -- name: Snippets - file: snippets.py show_help: true cloud_client_library: true diff --git a/language/snippets/cloud-client/v1/snippets.py b/language/snippets/cloud-client/v1/snippets.py deleted file mode 100644 index a95110a2b25c..000000000000 --- a/language/snippets/cloud-client/v1/snippets.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations with the -Google Cloud Natural Language API - -For more information, the documentation at -https://cloud.google.com/natural-language/docs. -""" - -import argparse -import sys - - -def sentiment_text(): - # [START language_sentiment_text] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'Hello, world!' - - client = language.LanguageServiceClient() - - try: - text = text.decode('utf-8') - except AttributeError: - pass - - # Instantiates a plain text document. - # [START language_python_migration_sentiment_text] - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_python_migration_sentiment_text] - # [END language_sentiment_text] - - -def sentiment_file(): - # [START language_sentiment_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/hello.txt' - - client = language.LanguageServiceClient() - - # Instantiates a plain text document. - # [START language_python_migration_document_gcs] - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_gcs] - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_sentiment_gcs] - - -def entities_text(): - # [START language_entities_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'President Kennedy spoke at the White House.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - # [START language_python_migration_entities_text] - # [START language_python_migration_document_text] - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_text] - - # Detects entities in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - for entity in entities: - entity_type = enums.Entity.Type(entity.type) - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) - # [END language_python_migration_entities_text] - # [END language_entities_text] - - -def entities_file(): - # [START language_entities_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/president.txt' - - client = language.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - for entity in entities: - entity_type = enums.Entity.Type(entity.type) - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) - # [END language_entities_gcs] - - -def syntax_text(): - # [START language_syntax_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'President Kennedy spoke at the White House.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - # [START language_python_migration_syntax_text] - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - for token in tokens: - part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) - print(u'{}: {}'.format(part_of_speech_tag.name, - token.text.content)) - # [END language_python_migration_syntax_text] - # [END language_syntax_text] - - -def syntax_file(): - # [START language_syntax_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/president.txt' - - client = language.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - for token in tokens: - part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) - print(u'{}: {}'.format(part_of_speech_tag.name, - token.text.content)) - # [END language_syntax_gcs] - - -def entity_sentiment_text(): - # [START language_entity_sentiment_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'President Kennedy spoke at the White House.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print('Mentions: ') - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) - # [END language_entity_sentiment_text] - - -def entity_sentiment_file(): - # [START language_entity_sentiment_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/president.txt' - - client = language.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) - # [END language_entity_sentiment_gcs] - - -def classify_text(): - # [START language_classify_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'Android is a mobile operating system developed by Google, ' \ - 'based on the Linux kernel and designed primarily for ' \ - 'touchscreen mobile devices such as smartphones and tablets.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) - # [END language_classify_text] - - -def classify_file(): - # [START language_classify_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/android.txt' - - client = language.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) - # [END language_classify_gcs] - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command') - - classify_text_parser = subparsers.add_parser( - 'classify-text', help=classify_text.__doc__) - - classify_text_parser = subparsers.add_parser( - 'classify-file', help=classify_file.__doc__) - - sentiment_entities_text_parser = subparsers.add_parser( - 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - - sentiment_entities_file_parser = subparsers.add_parser( - 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - - sentiment_text_parser = subparsers.add_parser( - 'sentiment-text', help=sentiment_text.__doc__) - - sentiment_file_parser = subparsers.add_parser( - 'sentiment-file', help=sentiment_file.__doc__) - - entities_text_parser = subparsers.add_parser( - 'entities-text', help=entities_text.__doc__) - - entities_file_parser = subparsers.add_parser( - 'entities-file', help=entities_file.__doc__) - - syntax_text_parser = subparsers.add_parser( - 'syntax-text', help=syntax_text.__doc__) - - syntax_file_parser = subparsers.add_parser( - 'syntax-file', help=syntax_file.__doc__) - - args = parser.parse_args() - - if args.command == 'sentiment-text': - sentiment_text() - elif args.command == 'sentiment-file': - sentiment_file() - elif args.command == 'entities-text': - entities_text() - elif args.command == 'entities-file': - entities_file() - elif args.command == 'syntax-text': - syntax_text() - elif args.command == 'syntax-file': - syntax_file() - elif args.command == 'sentiment-entities-text': - entity_sentiment_text() - elif args.command == 'sentiment-entities-file': - entity_sentiment_file() - elif args.command == 'classify-text': - classify_text() - elif args.command == 'classify-file': - classify_file() diff --git a/language/snippets/cloud-client/v1/snippets_test.py b/language/snippets/cloud-client/v1/snippets_test.py deleted file mode 100644 index ef09b1a17f72..000000000000 --- a/language/snippets/cloud-client/v1/snippets_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 Google, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import snippets - - -def test_sentiment_text(capsys): - snippets.sentiment_text() - out, _ = capsys.readouterr() - assert 'Score: ' in out - - -def test_sentiment_file(capsys): - snippets.sentiment_file() - out, _ = capsys.readouterr() - assert 'Score: ' in out - - -def test_entities_text(capsys): - snippets.entities_text() - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Kennedy' in out - - -def test_entities_file(capsys): - snippets.entities_file() - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Kennedy' in out - - -def test_syntax_text(capsys): - snippets.syntax_text() - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_syntax_file(capsys): - snippets.syntax_file() - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text() - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file() - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_classify_text(capsys): - snippets.classify_text() - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out - - -def test_classify_file(capsys): - snippets.classify_file() - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out From a907125558a611059244b9dd6c287cd9420f1222 Mon Sep 17 00:00:00 2001 From: Noah Negrey Date: Fri, 13 Mar 2020 14:24:50 -0600 Subject: [PATCH 141/323] langauge: fix old automl tests [(#3089)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3089) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- language/snippets/automl/dataset_test.py | 30 +++++++++++++++++------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/language/snippets/automl/dataset_test.py b/language/snippets/automl/dataset_test.py index fe68579fc60e..94e5e5d062c8 100644 --- a/language/snippets/automl/dataset_test.py +++ b/language/snippets/automl/dataset_test.py @@ -35,15 +35,7 @@ def test_dataset_create_import_delete(capsys): out, _ = capsys.readouterr() create_dataset_output = out.splitlines() assert "Dataset id: " in create_dataset_output[1] - - # import data dataset_id = create_dataset_output[1].split()[2] - data = "gs://{}-lcm/happiness.csv".format(project_id) - automl_natural_language_dataset.import_data( - project_id, compute_region, dataset_id, data - ) - out, _ = capsys.readouterr() - assert "Data imported." in out # delete dataset automl_natural_language_dataset.delete_dataset( @@ -53,6 +45,28 @@ def test_dataset_create_import_delete(capsys): assert "Dataset deleted." in out +def test_import_data(capsys): + # As importing a dataset can take a long time and only four operations can + # be run on a dataset at once. Try to import into a nonexistent dataset and + # confirm that the dataset was not found, but other elements of the request + # were valid. + try: + data = "gs://{}-lcm/happiness.csv".format(project_id) + automl_natural_language_dataset.import_data( + project_id, compute_region, "TEN0000000000000000000", data + ) + out, _ = capsys.readouterr() + assert ( + "Dataset doesn't exist or is inaccessible for use with AutoMl." + in out + ) + except Exception as e: + assert ( + "Dataset doesn't exist or is inaccessible for use with AutoMl." + in e.message + ) + + def test_dataset_list_get(capsys): # list datasets automl_natural_language_dataset.list_datasets( From bff0652f479f51a623dc8eebfea2c1e66719ba88 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 30 Mar 2020 21:10:18 +0200 Subject: [PATCH 142/323] chore(deps): update dependency numpy to v1.18.2 [(#3181)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3181) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 8a441e71dc0e..bdbf1b3a587e 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.17.4 +numpy==1.18.2 From e2e92ec993449ca8b07894bc09cb83016ac5697b Mon Sep 17 00:00:00 2001 From: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Date: Wed, 1 Apr 2020 19:11:50 -0700 Subject: [PATCH 143/323] Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot --- language/snippets/api/requirements-test.txt | 1 + language/snippets/automl/requirements-test.txt | 1 + language/snippets/classify_text/requirements-test.txt | 1 + language/snippets/cloud-client/v1/requirements-test.txt | 1 + language/snippets/generated-samples/v1/requirements-test.txt | 1 + language/snippets/sentiment/requirements-test.txt | 1 + 6 files changed, 6 insertions(+) create mode 100644 language/snippets/api/requirements-test.txt create mode 100644 language/snippets/automl/requirements-test.txt create mode 100644 language/snippets/classify_text/requirements-test.txt create mode 100644 language/snippets/cloud-client/v1/requirements-test.txt create mode 100644 language/snippets/generated-samples/v1/requirements-test.txt create mode 100644 language/snippets/sentiment/requirements-test.txt diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/language/snippets/api/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/language/snippets/automl/requirements-test.txt b/language/snippets/automl/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/language/snippets/automl/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/language/snippets/classify_text/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/language/snippets/sentiment/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 From 9049df208546883634004d85a2c5ec0cb87a1505 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Fri, 10 Apr 2020 14:14:14 -0700 Subject: [PATCH 144/323] Remove Language sample unused region_tag comments [(#3078)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3078) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- language/snippets/classify_text/classify_text_tutorial.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 2ce388cff09a..d193e62e367b 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START language_classify_text_tutorial] """Using the classify_text method to find content categories of text files, Then use the content category labels to compare text similarity. @@ -94,7 +93,6 @@ def index(path, index_file): # [END language_classify_text_tutorial_index] -# [START language_classify_text_tutorial_split_labels] def split_labels(categories): """The category labels are of the form "/a/b/c" up to three levels, for example "/Computers & Electronics/Software", and these labels @@ -121,10 +119,8 @@ def split_labels(categories): _categories[label] = confidence return _categories -# [END language_classify_text_tutorial_split_labels] -# [START language_classify_text_tutorial_similarity] def similarity(categories1, categories2): """Cosine similarity of the categories treated as sparse vectors.""" categories1 = split_labels(categories1) @@ -143,7 +139,6 @@ def similarity(categories1, categories2): dot += confidence * categories2.get(label, 0.0) return dot / (norm1 * norm2) -# [END language_classify_text_tutorial_similarity] # [START language_classify_text_tutorial_query] @@ -255,4 +250,3 @@ def query_category(index_file, category_string, n_top=3): query(args.index_file, args.text) if args.command == 'query-category': query_category(args.index_file, args.category) -# [END language_classify_text_tutorial] From 76b7a4d0d488a963085c4559b13122405026eaf1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 17 Apr 2020 03:09:45 +0200 Subject: [PATCH 145/323] Update dependency google-auth to v1.14.0 [(#3148)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3148) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index c27ca15eca9f..04c20c7a6612 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.11 -google-auth==1.11.2 +google-auth==1.14.0 google-auth-httplib2==0.0.3 From 614fc0ba401d993aa3ae2a2045b264b073a0987a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 17 Apr 2020 03:44:09 +0200 Subject: [PATCH 146/323] chore(deps): update dependency google-api-python-client to v1.8.0 [(#3100)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3100) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/google/google-api-python-client) | minor | `==1.7.11` -> `==1.8.0` | --- ### Release Notes
google/google-api-python-client ### [`v1.8.0`](https://togithub.com/google/google-api-python-client/releases/v1.8.0) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.7.12...v1.8.0) Release to support API endpoint override. New Features - Add api endpoint override. ([#​829](https://togithub.com/googleapis/google-api-python-client/pull/829)) Implementation Changes - Don't set http.redirect_codes if the attr doesn't exist and allow more httplib2 versions. ([#​841](https://togithub.com/googleapis/google-api-python-client/pull/841)) ### [`v1.7.12`](https://togithub.com/google/google-api-python-client/releases/v1.7.12) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.7.11...v1.7.12) Bugfix release Implementation Changes - Look for field 'detail' in error message. ([#​739](https://togithub.com/googleapis/google-api-python-client/pull/739)) - Exclude 308s from httplib2 redirect codes list ([#​813](https://togithub.com/googleapis/google-api-python-client/pull/813)) Documentation - Remove oauth2client from docs ([#​738](https://togithub.com/googleapis/google-api-python-client/pull/738)) - Fix typo. ([#​745](https://togithub.com/googleapis/google-api-python-client/pull/745)) - Remove compatibility badges. ([#​746](https://togithub.com/googleapis/google-api-python-client/pull/746)) - Fix TypeError: search_analytics_api_sample.py [#​732](https://togithub.com/google/google-api-python-client/issues/732) ([#​742](https://togithub.com/googleapis/google-api-python-client/pull/742)) - Correct response access ([#​750](https://togithub.com/googleapis/google-api-python-client/pull/750)) - Fix link to API explorer ([#​760](https://togithub.com/googleapis/google-api-python-client/pull/760)) - Fix argument typo in oauth2 code example ([#​763](https://togithub.com/googleapis/google-api-python-client/pull/763)) - Recommend install with virtualenv ([#​768](https://togithub.com/googleapis/google-api-python-client/pull/768)) - Fix capitalization in docs/README.md ([#​770](https://togithub.com/googleapis/google-api-python-client/pull/770)) - Remove compatibility badges ([#​796](https://togithub.com/googleapis/google-api-python-client/pull/796)) - Remove mentions of pycrypto ([#​799](https://togithub.com/googleapis/google-api-python-client/pull/799)) - Fix typo in model.py - Add note about Google Ads llibrary ([#​814](https://togithub.com/googleapis/google-api-python-client/pull/814)) Internal / Testing Changes - Blacken ([#​772](https://togithub.com/googleapis/google-api-python-client/pull/722)) - Move kokoro configs ([#​832](https://togithub.com/googleapis/google-api-python-client/pull/832))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 04c20c7a6612..46afe12bb258 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.7.11 +google-api-python-client==1.8.0 google-auth==1.14.0 google-auth-httplib2==0.0.3 From 09019d52347a25bb59d210349259aa41f56d6084 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 20 Apr 2020 18:55:50 +0200 Subject: [PATCH 147/323] chore(deps): update dependency numpy to v1.18.3 [(#3441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3441) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index bdbf1b3a587e..80d612f8c0d7 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.2 +numpy==1.18.3 From ae410f211d39c1f92ac11caa1990a5941a566fb9 Mon Sep 17 00:00:00 2001 From: Anthony Date: Thu, 23 Apr 2020 21:14:40 -0700 Subject: [PATCH 148/323] remove samples/tests that aren't on devsite, incl. localized docs [(#3423)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3423) Co-authored-by: Takashi Matsuo --- .../automl/automl_natural_language_dataset.py | 297 ------------- .../automl/automl_natural_language_model.py | 392 ------------------ .../automl/automl_natural_language_predict.py | 85 ---- language/snippets/automl/dataset_test.py | 85 ---- language/snippets/automl/model_test.py | 82 ---- language/snippets/automl/predict_test.py | 31 -- 6 files changed, 972 deletions(-) delete mode 100755 language/snippets/automl/automl_natural_language_dataset.py delete mode 100755 language/snippets/automl/automl_natural_language_model.py delete mode 100755 language/snippets/automl/automl_natural_language_predict.py delete mode 100644 language/snippets/automl/dataset_test.py delete mode 100644 language/snippets/automl/model_test.py delete mode 100644 language/snippets/automl/predict_test.py diff --git a/language/snippets/automl/automl_natural_language_dataset.py b/language/snippets/automl/automl_natural_language_dataset.py deleted file mode 100755 index df77d54268d4..000000000000 --- a/language/snippets/automl/automl_natural_language_dataset.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on Dataset -with the Google AutoML Natural Language API. - -For more information, see the tutorial page at -https://cloud.google.com/natural-language/automl/docs/ -""" - -import argparse -import os - - -def create_dataset(project_id, compute_region, dataset_name, multilabel=False): - """Create a dataset.""" - # [START automl_language_create_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_name = 'DATASET_NAME_HERE' - # multilabel = True for multilabel or False for multiclass - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # Classification type is assigned based on multilabel value. - classification_type = "MULTICLASS" - if multilabel: - classification_type = "MULTILABEL" - - # Specify the text classification type for the dataset. - dataset_metadata = {"classification_type": classification_type} - - # Set dataset name and metadata. - my_dataset = { - "display_name": dataset_name, - "text_classification_dataset_metadata": dataset_metadata, - } - - # Create a dataset with the dataset metadata in the region. - dataset = client.create_dataset(project_location, my_dataset) - - # Display the dataset information. - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Text classification dataset metadata:") - print("\t{}".format(dataset.text_classification_dataset_metadata)) - print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) - - # [END automl_language_create_dataset] - - -def list_datasets(project_id, compute_region, filter_): - """List all datasets.""" - # [START automl_language_list_datasets] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # List all the datasets available in the region by applying filter. - response = client.list_datasets(project_location, filter_) - - print("List of datasets:") - for dataset in response: - # Display the dataset information. - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Text classification dataset metadata:") - print("\t{}".format(dataset.text_classification_dataset_metadata)) - print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) - - # [END automl_language_list_datasets] - - -def get_dataset(project_id, compute_region, dataset_id): - """Get the dataset.""" - # [START automl_language_get_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Get complete detail of the dataset. - dataset = client.get_dataset(dataset_full_id) - - # Display the dataset information. - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Text classification dataset metadata:") - print("\t{}".format(dataset.text_classification_dataset_metadata)) - print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) - - # [END automl_language_get_dataset] - - -def import_data(project_id, compute_region, dataset_id, path): - """Import labelled items.""" - # [START automl_language_import_data] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # path = 'gs://path/to/file.csv' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Get the multiple Google Cloud Storage URIs. - input_uris = path.split(",") - input_config = {"gcs_source": {"input_uris": input_uris}} - - # Import the dataset from the input URI. - response = client.import_data(dataset_full_id, input_config) - - print("Processing import...") - # synchronous check of operation status. - print("Data imported. {}".format(response.result())) - - # [END automl_language_import_data] - - -def export_data(project_id, compute_region, dataset_id, output_uri): - """Export a dataset to a Google Cloud Storage bucket.""" - # [START automl_language_export_data] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # output_uri: 'gs://location/to/export/data' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Set the output URI - output_config = {"gcs_destination": {"output_uri_prefix": output_uri}} - - # Export the data to the output URI. - response = client.export_data(dataset_full_id, output_config) - - print("Processing export...") - # synchronous check of operation status. - print("Data exported. {}".format(response.result())) - - # [END automl_language_export_data] - - -def delete_dataset(project_id, compute_region, dataset_id): - """Delete a dataset.""" - # [START automl_language_delete_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Delete a dataset. - response = client.delete_dataset(dataset_full_id) - - # synchronous check of operation status. - print("Dataset deleted. {}".format(response.result())) - - # [END automl_language_delete_dataset] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - create_dataset_parser = subparsers.add_parser( - "create_dataset", help=create_dataset.__doc__ - ) - create_dataset_parser.add_argument("dataset_name") - create_dataset_parser.add_argument( - "multilabel", nargs="?", choices=["False", "True"], default="False" - ) - - list_datasets_parser = subparsers.add_parser( - "list_datasets", help=list_datasets.__doc__ - ) - list_datasets_parser.add_argument( - "filter_", nargs="?", default="text_classification_dataset_metadata:*" - ) - - get_dataset_parser = subparsers.add_parser( - "get_dataset", help=get_dataset.__doc__ - ) - get_dataset_parser.add_argument("dataset_id") - - import_data_parser = subparsers.add_parser( - "import_data", help=import_data.__doc__ - ) - import_data_parser.add_argument("dataset_id") - import_data_parser.add_argument("path") - - export_data_parser = subparsers.add_parser( - "export_data", help=export_data.__doc__ - ) - export_data_parser.add_argument("dataset_id") - export_data_parser.add_argument("output_uri") - - delete_dataset_parser = subparsers.add_parser( - "delete_dataset", help=delete_dataset.__doc__ - ) - delete_dataset_parser.add_argument("dataset_id") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "create_dataset": - multilabel = True if args.multilabel == "True" else False - create_dataset( - project_id, compute_region, args.dataset_name, multilabel - ) - if args.command == "list_datasets": - list_datasets(project_id, compute_region, args.filter_) - if args.command == "get_dataset": - get_dataset(project_id, compute_region, args.dataset_id) - if args.command == "import_data": - import_data(project_id, compute_region, args.dataset_id, args.path) - if args.command == "export_data": - export_data( - project_id, compute_region, args.dataset_id, args.output_uri - ) - if args.command == "delete_dataset": - delete_dataset(project_id, compute_region, args.dataset_id) diff --git a/language/snippets/automl/automl_natural_language_model.py b/language/snippets/automl/automl_natural_language_model.py deleted file mode 100755 index 354721213da5..000000000000 --- a/language/snippets/automl/automl_natural_language_model.py +++ /dev/null @@ -1,392 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on model -with the Google AutoML Natural Language API. - -For more information, see the tutorial page at -https://cloud.google.com/natural-language/automl/docs/ -""" - -import argparse -import os - - -def create_model(project_id, compute_region, dataset_id, model_name): - """Create a model.""" - # [START automl_language_create_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # model_name = 'MODEL_NAME_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # Set model name and model metadata for the dataset. - my_model = { - "display_name": model_name, - "dataset_id": dataset_id, - "text_classification_model_metadata": {}, - } - - # Create a model with the model metadata in the region. - response = client.create_model(project_location, my_model) - print("Training operation name: {}".format(response.operation.name)) - print("Training started...") - - # [END automl_language_create_model] - - -def get_operation_status(operation_full_id): - """Get operation status.""" - # [START automl_language_get_operation_status] - # TODO(developer): Uncomment and set the following variables - # operation_full_id = - # 'projects//locations//operations/' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the latest state of a long-running operation. - response = client.transport._operations_client.get_operation( - operation_full_id - ) - - print("Operation status: {}".format(response)) - - # [END automl_language_get_operation_status] - - -def list_models(project_id, compute_region, filter_): - """List all models.""" - # [START automl_language_list_models] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # List all the models available in the region by applying filter. - response = client.list_models(project_location, filter_) - - print("List of models:") - for model in response: - # Retrieve deployment state. - deployment_state = "" - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: - deployment_state = "deployed" - else: - deployment_state = "undeployed" - - # Display the model information. - print("Model name: {}".format(model.name)) - print("Model id: {}".format(model.name.split("/")[-1])) - print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) - print("Model deployment state: {}".format(deployment_state)) - - # [END automl_language_list_models] - - -def get_model(project_id, compute_region, model_id): - """Get model details.""" - # [START automl_language_get_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # Get complete detail of the model. - model = client.get_model(model_full_id) - - # Retrieve deployment state. - deployment_state = "" - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: - deployment_state = "deployed" - else: - deployment_state = "undeployed" - - # Display the model information. - print("Model name: {}".format(model.name)) - print("Model id: {}".format(model.name.split("/")[-1])) - print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) - print("Model deployment state: {}".format(deployment_state)) - - # [END automl_language_get_model] - - -def list_model_evaluations(project_id, compute_region, model_id, filter_): - """List model evaluations.""" - # [START automl_language_list_model_evaluations] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # List all the model evaluations in the model by applying filter. - response = client.list_model_evaluations(model_full_id, filter_) - - print("List of model evaluations:") - for element in response: - print(element) - - # [END automl_language_list_model_evaluations] - - -def get_model_evaluation( - project_id, compute_region, model_id, model_evaluation_id -): - """Get model evaluation.""" - # [START automl_language_get_model_evaluation] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # model_evaluation_id = 'MODEL_EVALUATION_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, compute_region, model_id, model_evaluation_id - ) - - # Get complete detail of the model evaluation. - response = client.get_model_evaluation(model_evaluation_full_id) - - print(response) - - # [END automl_language_get_model_evaluation] - - -def display_evaluation(project_id, compute_region, model_id, filter_): - """Display evaluation.""" - # [START automl_language_display_evaluation] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # List all the model evaluations in the model by applying filter. - response = client.list_model_evaluations(model_full_id, filter_) - - # Iterate through the results. - for element in response: - # There is evaluation for each class in a model and for overall model. - # Get only the evaluation of overall model. - if not element.annotation_spec_id: - model_evaluation_id = element.name.split("/")[-1] - - # Resource name for the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, compute_region, model_id, model_evaluation_id - ) - - # Get a model evaluation. - model_evaluation = client.get_model_evaluation(model_evaluation_full_id) - - class_metrics = model_evaluation.classification_evaluation_metrics - confidence_metrics_entries = class_metrics.confidence_metrics_entry - - # Showing model score based on threshold of 0.5 - for confidence_metrics_entry in confidence_metrics_entries: - if confidence_metrics_entry.confidence_threshold == 0.5: - print("Precision and recall are based on a score threshold of 0.5") - print( - "Model Precision: {}%".format( - round(confidence_metrics_entry.precision * 100, 2) - ) - ) - print( - "Model Recall: {}%".format( - round(confidence_metrics_entry.recall * 100, 2) - ) - ) - print( - "Model F1 score: {}%".format( - round(confidence_metrics_entry.f1_score * 100, 2) - ) - ) - print( - "Model Precision@1: {}%".format( - round(confidence_metrics_entry.precision_at1 * 100, 2) - ) - ) - print( - "Model Recall@1: {}%".format( - round(confidence_metrics_entry.recall_at1 * 100, 2) - ) - ) - print( - "Model F1 score@1: {}%".format( - round(confidence_metrics_entry.f1_score_at1 * 100, 2) - ) - ) - - # [END automl_language_display_evaluation] - - -def delete_model(project_id, compute_region, model_id): - """Delete a model.""" - # [START automl_language_delete_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # Delete a model. - response = client.delete_model(model_full_id) - - # synchronous check of operation status. - print("Model deleted. {}".format(response.result())) - - # [END automl_language_delete_model] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - create_model_parser = subparsers.add_parser( - "create_model", help=create_model.__doc__ - ) - create_model_parser.add_argument("dataset_id") - create_model_parser.add_argument("model_name") - - get_operation_status_parser = subparsers.add_parser( - "get_operation_status", help=get_operation_status.__doc__ - ) - get_operation_status_parser.add_argument("operation_full_id") - - list_models_parser = subparsers.add_parser( - "list_models", help=list_models.__doc__ - ) - list_models_parser.add_argument("filter_") - - get_model_parser = subparsers.add_parser( - "get_model", help=get_model_evaluation.__doc__ - ) - get_model_parser.add_argument("model_id") - - list_model_evaluations_parser = subparsers.add_parser( - "list_model_evaluations", help=list_model_evaluations.__doc__ - ) - list_model_evaluations_parser.add_argument("model_id") - list_model_evaluations_parser.add_argument( - "filter_", nargs="?", default="" - ) - - get_model_evaluation_parser = subparsers.add_parser( - "get_model_evaluation", help=get_model_evaluation.__doc__ - ) - get_model_evaluation_parser.add_argument("model_id") - get_model_evaluation_parser.add_argument("model_evaluation_id") - - display_evaluation_parser = subparsers.add_parser( - "display_evaluation", help=display_evaluation.__doc__ - ) - display_evaluation_parser.add_argument("model_id") - display_evaluation_parser.add_argument("filter_", nargs="?", default="") - - delete_model_parser = subparsers.add_parser( - "delete_model", help=delete_model.__doc__ - ) - delete_model_parser.add_argument("model_id") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "create_model": - create_model( - project_id, compute_region, args.dataset_id, args.model_name - ) - if args.command == "get_operation_status": - get_operation_status(args.operation_full_id) - if args.command == "list_models": - list_models(project_id, compute_region, args.filter_) - if args.command == "get_model": - get_model(project_id, compute_region, args.model_id) - if args.command == "list_model_evaluations": - list_model_evaluations( - project_id, compute_region, args.model_id, args.filter_ - ) - if args.command == "get_model_evaluation": - get_model_evaluation( - project_id, compute_region, args.model_id, args.model_evaluation_id - ) - if args.command == "display_evaluation": - display_evaluation( - project_id, compute_region, args.model_id, args.filter_ - ) - if args.command == "delete_model": - delete_model(project_id, compute_region, args.model_id) diff --git a/language/snippets/automl/automl_natural_language_predict.py b/language/snippets/automl/automl_natural_language_predict.py deleted file mode 100755 index b328c7aeb63b..000000000000 --- a/language/snippets/automl/automl_natural_language_predict.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on prediction -with the Google AutoML Natural Language API. - -For more information, see the tutorial page at -https://cloud.google.com/natural-language/automl/docs/ -""" - -import argparse -import os - - -def predict(project_id, compute_region, model_id, file_path): - """Classify the content.""" - # [START automl_language_predict] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # file_path = '/local/path/to/file' - - from google.cloud import automl_v1beta1 as automl - - automl_client = automl.AutoMlClient() - - # Create client for prediction service. - prediction_client = automl.PredictionServiceClient() - - # Get the full path of the model. - model_full_id = automl_client.model_path( - project_id, compute_region, model_id - ) - - # Read the file content for prediction. - with open(file_path, "rb") as content_file: - snippet = content_file.read() - - # Set the payload by giving the content and type of the file. - payload = {"text_snippet": {"content": snippet, "mime_type": "text/plain"}} - - # params is additional domain-specific parameters. - # currently there is no additional parameters supported. - params = {} - response = prediction_client.predict(model_full_id, payload, params) - print("Prediction results:") - for result in response.payload: - print("Predicted class name: {}".format(result.display_name)) - print("Predicted class score: {}".format(result.classification.score)) - - # [END automl_language_predict] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - predict_parser = subparsers.add_parser("predict", help=predict.__doc__) - predict_parser.add_argument("model_id") - predict_parser.add_argument("file_path") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "predict": - predict(project_id, compute_region, args.model_id, args.file_path) diff --git a/language/snippets/automl/dataset_test.py b/language/snippets/automl/dataset_test.py deleted file mode 100644 index 94e5e5d062c8..000000000000 --- a/language/snippets/automl/dataset_test.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -import pytest - -import automl_natural_language_dataset - -project_id = os.environ["GCLOUD_PROJECT"] -compute_region = "us-central1" - - -@pytest.mark.slow -def test_dataset_create_import_delete(capsys): - # create dataset - dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - automl_natural_language_dataset.create_dataset( - project_id, compute_region, dataset_name - ) - out, _ = capsys.readouterr() - create_dataset_output = out.splitlines() - assert "Dataset id: " in create_dataset_output[1] - dataset_id = create_dataset_output[1].split()[2] - - # delete dataset - automl_natural_language_dataset.delete_dataset( - project_id, compute_region, dataset_id - ) - out, _ = capsys.readouterr() - assert "Dataset deleted." in out - - -def test_import_data(capsys): - # As importing a dataset can take a long time and only four operations can - # be run on a dataset at once. Try to import into a nonexistent dataset and - # confirm that the dataset was not found, but other elements of the request - # were valid. - try: - data = "gs://{}-lcm/happiness.csv".format(project_id) - automl_natural_language_dataset.import_data( - project_id, compute_region, "TEN0000000000000000000", data - ) - out, _ = capsys.readouterr() - assert ( - "Dataset doesn't exist or is inaccessible for use with AutoMl." - in out - ) - except Exception as e: - assert ( - "Dataset doesn't exist or is inaccessible for use with AutoMl." - in e.message - ) - - -def test_dataset_list_get(capsys): - # list datasets - automl_natural_language_dataset.list_datasets( - project_id, compute_region, "" - ) - out, _ = capsys.readouterr() - list_dataset_output = out.splitlines() - assert "Dataset id: " in list_dataset_output[2] - - # get dataset - dataset_id = list_dataset_output[2].split()[2] - automl_natural_language_dataset.get_dataset( - project_id, compute_region, dataset_id - ) - out, _ = capsys.readouterr() - assert "Dataset name: " in out diff --git a/language/snippets/automl/model_test.py b/language/snippets/automl/model_test.py deleted file mode 100644 index 8f484d2a2ad1..000000000000 --- a/language/snippets/automl/model_test.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -from google.cloud import automl_v1beta1 as automl -import pytest - -import automl_natural_language_model - -project_id = os.environ["GCLOUD_PROJECT"] -compute_region = "us-central1" - - -@pytest.mark.skip(reason="creates too many models") -def test_model_create_status_delete(capsys): - # create model - client = automl.AutoMlClient() - model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - project_location = client.location_path(project_id, compute_region) - my_model = { - "display_name": model_name, - "dataset_id": "2551826603472450019", - "text_classification_model_metadata": {}, - } - response = client.create_model(project_location, my_model) - operation_name = response.operation.name - assert operation_name - - # get operation status - automl_natural_language_model.get_operation_status(operation_name) - out, _ = capsys.readouterr() - assert "Operation status: " in out - - # cancel operation - response.cancel() - - -def test_model_list_get_evaluate(capsys): - # list models - automl_natural_language_model.list_models(project_id, compute_region, "") - out, _ = capsys.readouterr() - list_models_output = out.splitlines() - assert "Model id: " in list_models_output[2] - - # get model - model_id = list_models_output[2].split()[2] - automl_natural_language_model.get_model( - project_id, compute_region, model_id - ) - out, _ = capsys.readouterr() - assert "Model name: " in out - - # list model evaluations - automl_natural_language_model.list_model_evaluations( - project_id, compute_region, model_id, "" - ) - out, _ = capsys.readouterr() - list_evals_output = out.splitlines() - assert "name: " in list_evals_output[1] - - # get model evaluation - model_evaluation_id = list_evals_output[1].split("/")[-1][:-1] - automl_natural_language_model.get_model_evaluation( - project_id, compute_region, model_id, model_evaluation_id - ) - out, _ = capsys.readouterr() - assert "evaluation_metric" in out diff --git a/language/snippets/automl/predict_test.py b/language/snippets/automl/predict_test.py deleted file mode 100644 index f511302d58ba..000000000000 --- a/language/snippets/automl/predict_test.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import automl_natural_language_predict - -project_id = os.environ["GCLOUD_PROJECT"] -compute_region = "us-central1" - - -def test_predict(capsys): - model_id = "TCN3472481026502981088" - automl_natural_language_predict.predict( - project_id, compute_region, model_id, "resources/test.txt" - ) - out, _ = capsys.readouterr() - assert "Cheese" in out From 9c3411a2f5f06b51ac9079573a0431c7111e960a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 24 Apr 2020 06:52:24 +0200 Subject: [PATCH 149/323] Update dependency google-api-python-client to v1.8.2 [(#3452)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3452) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/google/google-api-python-client) | patch | `==1.8.0` -> `==1.8.2` | | [google-api-python-client](https://togithub.com/google/google-api-python-client) | minor | `==1.7.11` -> `==1.8.2` | --- ### Release Notes
google/google-api-python-client ### [`v1.8.2`](https://togithub.com/google/google-api-python-client/blob/master/CHANGELOG.md#​182-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev181v182-2020-04-21) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.8.1...v1.8.2) ### [`v1.8.1`](https://togithub.com/google/google-api-python-client/blob/master/CHANGELOG.md#​181-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev180v181-2020-04-20) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.8.0...v1.8.1)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 46afe12bb258..06ba56f1b073 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.0 +google-api-python-client==1.8.2 google-auth==1.14.0 google-auth-httplib2==0.0.3 From 9e081658e0b40ecb87697693791830415e97497b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 28 Apr 2020 06:20:12 +0200 Subject: [PATCH 150/323] chore(deps): update dependency google-auth to v1.14.1 [(#3464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3464) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.14.0` -> `==1.14.1` | | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | minor | `==1.11.2` -> `==1.14.1` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.14.1`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1141-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1140v1141-2020-04-21) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.14.0...v1.14.1)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 06ba56f1b073..c5ff186f6b42 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.0 +google-auth==1.14.1 google-auth-httplib2==0.0.3 From 98baf716f2de631f7714a4f2200ae7796d45bfeb Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 5 May 2020 00:58:18 +0200 Subject: [PATCH 151/323] chore(deps): update dependency numpy to v1.18.4 [(#3675)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3675) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: Takashi Matsuo --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 80d612f8c0d7..000eb4bcf2b4 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.3 +numpy==1.18.4 From 283cde66ecbd5b5a2a2ba840283552d9af6f0369 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 11 May 2020 22:24:11 +0200 Subject: [PATCH 152/323] chore(deps): update dependency google-auth to v1.14.2 [(#3724)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3724) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.14.1` -> `==1.14.2` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.14.2`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1142-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1141v1142-2020-05-07) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.14.1...v1.14.2)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index c5ff186f6b42..a22972ab36d5 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.1 +google-auth==1.14.2 google-auth-httplib2==0.0.3 From 6d03155d508954e2a3b44d5718c5aec693a34206 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 13 May 2020 08:16:04 +0200 Subject: [PATCH 153/323] chore(deps): update dependency google-auth to v1.14.3 [(#3728)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3728) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.14.2` -> `==1.14.3` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.14.3`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1143-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1142v1143-2020-05-11) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.14.2...v1.14.3)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [x] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index a22972ab36d5..6dbe6ea04c2f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.2 +google-auth==1.14.3 google-auth-httplib2==0.0.3 From 57909bda981e5845355e4c4e0e65188dd353874d Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Mon, 18 May 2020 21:32:27 -0700 Subject: [PATCH 154/323] update google-auth to 1.15.0 final part [(#3819)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3819) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 6dbe6ea04c2f..52ccdec8694e 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.3 +google-auth==1.15.0 google-auth-httplib2==0.0.3 From ea986f1035468cc6d34caa2d55f1697ef6bffc8e Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Mon, 18 May 2020 22:14:32 -0700 Subject: [PATCH 155/323] update google-api-python-client to 1.8.3 final part [(#3827)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3827) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 52ccdec8694e..421ee1f3d9c8 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.2 +google-api-python-client==1.8.3 google-auth==1.15.0 google-auth-httplib2==0.0.3 From aa65b6d5bb167d6f188a4fc9479cbfee0eb1dc55 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 27 May 2020 18:17:02 +0200 Subject: [PATCH 156/323] chore(deps): update dependency google-api-python-client to v1.8.4 [(#3881)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3881) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: gcf-merge-on-green[bot] <60162190+gcf-merge-on-green[bot]@users.noreply.github.com> --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 421ee1f3d9c8..2c0db82b498a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.3 +google-api-python-client==1.8.4 google-auth==1.15.0 google-auth-httplib2==0.0.3 From 6dd04c9d542243b8ed3852952c52a043b3f1bdff Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 29 May 2020 00:27:36 +0200 Subject: [PATCH 157/323] chore(deps): update dependency google-auth to v1.16.0 [(#3903)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3903) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 2c0db82b498a..0673bda357d5 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.4 -google-auth==1.15.0 +google-auth==1.16.0 google-auth-httplib2==0.0.3 From 05580f6a57590982cbb6507840ce1d24533dbb36 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 2 Jun 2020 21:36:49 +0200 Subject: [PATCH 158/323] chore(deps): update dependency google-api-python-client to v1.9.1 [(#3930)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3930) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0673bda357d5..0f61949724cb 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.4 +google-api-python-client==1.9.1 google-auth==1.16.0 google-auth-httplib2==0.0.3 From 9a4f51464d882920e6c1ebd6f604df8d7b94ebfd Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 4 Jun 2020 04:08:31 +0200 Subject: [PATCH 159/323] Update dependency numpy to v1.18.5 [(#3954)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3954) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 000eb4bcf2b4..787df584e2e7 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.4 +numpy==1.18.5 From 2a7a88dd812661ee1a8071843b081dec23d3f093 Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Thu, 4 Jun 2020 17:28:57 -0700 Subject: [PATCH 160/323] final update for google-auth [(#3967)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3967) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0f61949724cb..130c1828654a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.1 -google-auth==1.16.0 +google-auth==1.16.1 google-auth-httplib2==0.0.3 From b180e9273c029e1337192b35e6e0084b15ddb7f9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 11 Jun 2020 01:14:10 +0200 Subject: [PATCH 161/323] Update dependency google-api-python-client to v1.9.2 [(#4038)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4038) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 130c1828654a..db1b542c08f8 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.9.1 +google-api-python-client==1.9.2 google-auth==1.16.1 google-auth-httplib2==0.0.3 From 5b4d82ac0ef45f1786b5ebe628a8d07732d76736 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 11 Jun 2020 21:51:16 +0200 Subject: [PATCH 162/323] Update dependency google-auth to v1.17.0 [(#4058)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4058) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index db1b542c08f8..387355ad4017 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.2 -google-auth==1.16.1 +google-auth==1.17.0 google-auth-httplib2==0.0.3 From 7fde4908b2885c7db13d0b049a961a43f18f4493 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Jun 2020 02:32:11 +0200 Subject: [PATCH 163/323] chore(deps): update dependency google-auth to v1.17.1 [(#4073)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4073) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 387355ad4017..e2b7f65e1188 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.2 -google-auth==1.17.0 +google-auth==1.17.1 google-auth-httplib2==0.0.3 From 560cfe668c7af9aa323186c4a5afd679cef02b71 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Jun 2020 22:53:46 +0200 Subject: [PATCH 164/323] Update dependency google-auth to v1.17.2 [(#4083)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4083) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e2b7f65e1188..dd41dc46b89b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.2 -google-auth==1.17.1 +google-auth==1.17.2 google-auth-httplib2==0.0.3 From e89843ca4dbfa609fef642a232a46c16650e617d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Jun 2020 23:16:14 +0200 Subject: [PATCH 165/323] Update dependency google-api-python-client to v1.9.3 [(#4057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4057) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | patch | `==1.9.2` -> `==1.9.3` | --- ### Release Notes
googleapis/google-api-python-client ### [`v1.9.3`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​193-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev192v193-2020-06-10) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v1.9.2...v1.9.3)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index dd41dc46b89b..360c7ed11b68 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.9.2 +google-api-python-client==1.9.3 google-auth==1.17.2 google-auth-httplib2==0.0.3 From 1293c8db3d57e00168d4ae24767948b7292fb2f8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 19 Jun 2020 05:34:55 +0200 Subject: [PATCH 166/323] Update dependency google-auth to v1.18.0 [(#4125)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4125) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 360c7ed11b68..39fd57da1266 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.3 -google-auth==1.17.2 +google-auth==1.18.0 google-auth-httplib2==0.0.3 From 4e648570f2a29f6361419d7f8b7b72731ff3b845 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 20 Jun 2020 01:16:04 +0200 Subject: [PATCH 167/323] chore(deps): update dependency google-cloud-automl to v1 [(#4127)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4127) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-cloud-automl](https://togithub.com/googleapis/python-automl) | major | `==0.10.0` -> `==1.0.1` | --- ### Release Notes
googleapis/python-automl ### [`v1.0.1`](https://togithub.com/googleapis/python-automl/blob/master/CHANGELOG.md#​101-httpswwwgithubcomgoogleapispython-automlcomparev100v101-2020-06-18) [Compare Source](https://togithub.com/googleapis/python-automl/compare/v0.10.0...v1.0.1)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/automl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt index eb3be7610a1b..867dfc61e77d 100644 --- a/language/snippets/automl/requirements.txt +++ b/language/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.10.0 +google-cloud-automl==1.0.1 From a8f7beeeed77d9c22a6c309ce54a2937e5c43d69 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 23 Jun 2020 06:02:31 +0200 Subject: [PATCH 168/323] chore(deps): update dependency numpy to v1.19.0 [(#4137)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4137) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 787df584e2e7..c575c64d5bec 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.5 +numpy==1.19.0 From d23fae60940c38897e94e7edaa75195f951effa9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 9 Jul 2020 02:00:20 +0200 Subject: [PATCH 169/323] Update dependency google-auth-httplib2 to v0.0.4 [(#4255)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4255) Co-authored-by: Takashi Matsuo --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 39fd57da1266..9ae1b98d17be 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.3 google-auth==1.18.0 -google-auth-httplib2==0.0.3 +google-auth-httplib2==0.0.4 From 69c407747bdf8d63b4693fe82867ab80cd010f90 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 13 Jul 2020 00:46:30 +0200 Subject: [PATCH 170/323] chore(deps): update dependency pytest to v5.4.3 [(#4279)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4279) * chore(deps): update dependency pytest to v5.4.3 * specify pytest for python 2 in appengine Co-authored-by: Leah Cole --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/automl/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index 781d4326c947..79738af5f268 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/language/snippets/automl/requirements-test.txt b/language/snippets/automl/requirements-test.txt index 781d4326c947..79738af5f268 100644 --- a/language/snippets/automl/requirements-test.txt +++ b/language/snippets/automl/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index 781d4326c947..79738af5f268 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index 781d4326c947..79738af5f268 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index 781d4326c947..79738af5f268 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index 781d4326c947..79738af5f268 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 From ff3886c3b8f489a9adc79f0a429b3094b9da2cf1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 13 Jul 2020 22:20:34 +0200 Subject: [PATCH 171/323] chore(deps): update dependency google-auth to v1.19.0 [(#4293)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4293) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 9ae1b98d17be..b622076400dc 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.3 -google-auth==1.18.0 +google-auth==1.19.0 google-auth-httplib2==0.0.4 From eb8667f16b7bbddf7058dfcaeaf6f0063d6fdd2a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 16 Jul 2020 23:24:07 +0200 Subject: [PATCH 172/323] Update dependency google-api-python-client to v1.10.0 [(#4302)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4302) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | minor | `==1.9.3` -> `==1.10.0` | --- ### Release Notes
googleapis/google-api-python-client ### [`v1.10.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​1100-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev193v1100-2020-07-15) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v1.9.3...v1.10.0) ##### Features - allow to use 'six.moves.collections_abc.Mapping' in 'client_options.from_dict()' ([#​943](https://www.github.com/googleapis/google-api-python-client/issues/943)) ([21af37b](https://www.github.com/googleapis/google-api-python-client/commit/21af37b11ea2d6a89b3df484e1b2fa1d12849510)) - Build universal wheels ([#​948](https://www.github.com/googleapis/google-api-python-client/issues/948)) ([3e28a1e](https://www.github.com/googleapis/google-api-python-client/commit/3e28a1e0d47f829182cd92f37475ab91fa5e4afc)) - discovery supports retries ([#​967](https://www.github.com/googleapis/google-api-python-client/issues/967)) ([f3348f9](https://www.github.com/googleapis/google-api-python-client/commit/f3348f98bf91a88a28bf61b12b95e391cc3be1ff)), closes [#​848](https://www.github.com/googleapis/google-api-python-client/issues/848) ##### Documentation - consolidating and updating the Contribution Guide ([#​964](https://www.github.com/googleapis/google-api-python-client/issues/964)) ([63f97f3](https://www.github.com/googleapis/google-api-python-client/commit/63f97f37daee37a725eb05df3097b20d5d4eaaf0)), closes [#​963](https://www.github.com/googleapis/google-api-python-client/issues/963) ##### [1.9.3](https://www.github.com/googleapis/google-api-python-client/compare/v1.9.2...v1.9.3) (2020-06-10) ##### Bug Fixes - update GOOGLE_API_USE_MTLS values ([#​940](https://www.github.com/googleapis/google-api-python-client/issues/940)) ([19908ed](https://www.github.com/googleapis/google-api-python-client/commit/19908edcd8a3df1db41e34100acc1f15c3c99397)) ##### [1.9.2](https://www.github.com/googleapis/google-api-python-client/compare/v1.9.1...v1.9.2) (2020-06-04) ##### Bug Fixes - bump api-core version ([#​936](https://www.github.com/googleapis/google-api-python-client/issues/936)) ([ee53b3b](https://www.github.com/googleapis/google-api-python-client/commit/ee53b3b32a050874ba4cfb491fb384f94682c824)) ##### [1.9.1](https://www.github.com/googleapis/google-api-python-client/compare/v1.9.0...v1.9.1) (2020-06-02) ##### Bug Fixes - fix python-api-core dependency issue ([#​931](https://www.github.com/googleapis/google-api-python-client/issues/931)) ([42028ed](https://www.github.com/googleapis/google-api-python-client/commit/42028ed2b2be47f85b70eb813185264f1f573d01))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index b622076400dc..1545b7277e55 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.9.3 +google-api-python-client==1.10.0 google-auth==1.19.0 google-auth-httplib2==0.0.4 From d90ac7266ef65821f831d2e2a9359f541ffdd8a1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 17 Jul 2020 19:02:17 +0200 Subject: [PATCH 173/323] chore(deps): update dependency google-auth to v1.19.1 [(#4304)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4304) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 1545b7277e55..2b8bcacdcb8d 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.19.0 +google-auth==1.19.1 google-auth-httplib2==0.0.4 From 32e51e53a5eb0b0f5825ecc31c7b5106ac8b66c4 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 18 Jul 2020 02:48:10 +0200 Subject: [PATCH 174/323] chore(deps): update dependency google-auth to v1.19.2 [(#4321)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4321) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.19.1` -> `==1.19.2` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.19.2`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1192-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1191v1192-2020-07-17) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.19.1...v1.19.2)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 2b8bcacdcb8d..5f63ebcbf55f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.19.1 +google-auth==1.19.2 google-auth-httplib2==0.0.4 From aaf993749533a70f0af046618f23bfc6d97a7ec6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 21 Jul 2020 23:48:04 +0200 Subject: [PATCH 175/323] chore(deps): update dependency numpy to v1.19.1 [(#4351)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4351) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index c575c64d5bec..7ff166cc7e2c 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.19.0 +numpy==1.19.1 From ccb05ac64cc3bf34620d2b72e75e1e7bc61a92bb Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 28 Jul 2020 22:36:14 +0200 Subject: [PATCH 176/323] Update dependency google-auth to v1.20.0 [(#4387)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4387) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 5f63ebcbf55f..4f9d90056749 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.19.2 +google-auth==1.20.0 google-auth-httplib2==0.0.4 From 47c5cbf440d7d8c7bb8840ec97d5123057711941 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 1 Aug 2020 21:51:00 +0200 Subject: [PATCH 177/323] Update dependency pytest to v6 [(#4390)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4390) --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/automl/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index 79738af5f268..7e460c8c866e 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/language/snippets/automl/requirements-test.txt b/language/snippets/automl/requirements-test.txt index 79738af5f268..7e460c8c866e 100644 --- a/language/snippets/automl/requirements-test.txt +++ b/language/snippets/automl/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index 79738af5f268..7e460c8c866e 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index 79738af5f268..7e460c8c866e 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index 79738af5f268..7e460c8c866e 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index 79738af5f268..7e460c8c866e 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 From 20e80f9a66652dcf3a002d0356c36045ae6da70f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 7 Aug 2020 03:36:31 +0200 Subject: [PATCH 178/323] chore(deps): update dependency google-auth to v1.20.1 [(#4452)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4452) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 4f9d90056749..41f4cf40e0d5 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.20.0 +google-auth==1.20.1 google-auth-httplib2==0.0.4 From e15fbf77f6bdadd3ab6758ef872b4c8eddff1612 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 27 Aug 2020 06:12:53 +0200 Subject: [PATCH 179/323] chore(deps): update dependency google-api-python-client to v1.10.1 [(#4557)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4557) * chore(deps): update dependency google-api-python-client to v1.10.1 * Update requirements.txt Co-authored-by: Takashi Matsuo --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 41f4cf40e0d5..6e149081c31a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.10.0 +google-api-python-client==1.10.1 google-auth==1.20.1 google-auth-httplib2==0.0.4 From 170b5a338556afbee966b4456efa279ae6a6731c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 28 Aug 2020 01:17:31 +0200 Subject: [PATCH 180/323] Update dependency google-auth to v1.21.0 [(#4588)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4588) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 6e149081c31a..e25b0a8b96b2 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.1 -google-auth==1.20.1 +google-auth==1.21.0 google-auth-httplib2==0.0.4 From 2b42ed8f7b77668eafb88af4d8b6458f7e44aee7 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 28 Aug 2020 07:21:42 +0200 Subject: [PATCH 181/323] Update dependency google-api-python-client to v1.11.0 [(#4587)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4587) Co-authored-by: Takashi Matsuo --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e25b0a8b96b2..b5124a505c42 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.10.1 +google-api-python-client==1.11.0 google-auth==1.21.0 google-auth-httplib2==0.0.4 From 2604439084cc7ba92d8e6a5c60b811a65428c842 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 10 Sep 2020 00:57:06 +0200 Subject: [PATCH 182/323] chore(deps): update dependency google-auth to v1.21.1 [(#4634)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4634) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index b5124a505c42..7858cee7e3a7 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.11.0 -google-auth==1.21.0 +google-auth==1.21.1 google-auth-httplib2==0.0.4 From d2a24936d83b45010c758d4e35429d160e8b518b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 15 Sep 2020 00:59:19 +0200 Subject: [PATCH 183/323] chore(deps): update dependency numpy to v1.19.2 [(#4662)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4662) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 7ff166cc7e2c..de040ee00ca4 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.19.1 +numpy==1.19.2 From ceee0ad05074392ee5cf855ce4ed68701948fa1c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 18 Sep 2020 18:30:21 +0200 Subject: [PATCH 184/323] chore(deps): update dependency google-auth to v1.21.2 [(#4684)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4684) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 7858cee7e3a7..85a462c8be7f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.11.0 -google-auth==1.21.1 +google-auth==1.21.2 google-auth-httplib2==0.0.4 From 0ba09d247842825d1c2227efe74f963101ea3649 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 18 Sep 2020 13:46:06 -0600 Subject: [PATCH 185/323] chore: delete automl samples [(#4696)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4696) --- language/snippets/automl/requirements-test.txt | 1 - language/snippets/automl/requirements.txt | 1 - language/snippets/automl/resources/test.txt | 1 - 3 files changed, 3 deletions(-) delete mode 100644 language/snippets/automl/requirements-test.txt delete mode 100644 language/snippets/automl/requirements.txt delete mode 100644 language/snippets/automl/resources/test.txt diff --git a/language/snippets/automl/requirements-test.txt b/language/snippets/automl/requirements-test.txt deleted file mode 100644 index 7e460c8c866e..000000000000 --- a/language/snippets/automl/requirements-test.txt +++ /dev/null @@ -1 +0,0 @@ -pytest==6.0.1 diff --git a/language/snippets/automl/requirements.txt b/language/snippets/automl/requirements.txt deleted file mode 100644 index 867dfc61e77d..000000000000 --- a/language/snippets/automl/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-automl==1.0.1 diff --git a/language/snippets/automl/resources/test.txt b/language/snippets/automl/resources/test.txt deleted file mode 100644 index f0dde24bd9b1..000000000000 --- a/language/snippets/automl/resources/test.txt +++ /dev/null @@ -1 +0,0 @@ -A strong taste of hazlenut and orange From 3f5507f4ef7210651bfa9b76753cd295b336aae6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 22 Sep 2020 22:11:39 +0200 Subject: [PATCH 186/323] chore(deps): update dependency google-api-python-client to v1.12.1 [(#4674)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4674) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 85a462c8be7f..f739af513968 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.11.0 +google-api-python-client==1.12.1 google-auth==1.21.2 google-auth-httplib2==0.0.4 From 8685b778935e957cb94e38e4a73f169bb0f30aff Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 23 Sep 2020 22:31:01 +0200 Subject: [PATCH 187/323] chore(deps): update dependency google-auth to v1.21.3 [(#4754)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4754) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index f739af513968..572132c4f078 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.1 -google-auth==1.21.2 +google-auth==1.21.3 google-auth-httplib2==0.0.4 From 55b3069d43ffd393bc0fdd8b035aaa27653f3d2e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 24 Sep 2020 22:42:49 +0200 Subject: [PATCH 188/323] chore(deps): update dependency google-api-python-client to v1.12.2 [(#4751)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4751) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 572132c4f078..72a261b14e37 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.12.1 +google-api-python-client==1.12.2 google-auth==1.21.3 google-auth-httplib2==0.0.4 From 498813c7808cf3ba53e423ff267f997f1b558359 Mon Sep 17 00:00:00 2001 From: Harikumar Devandla Date: Sun, 27 Sep 2020 11:36:58 -0700 Subject: [PATCH 189/323] chore: update templates --- language/AUTHORING_GUIDE.md | 1 + language/CONTRIBUTING.md | 1 + language/snippets/api/analyze.py | 54 ++-- language/snippets/api/analyze_test.py | 242 ++++++++++-------- language/snippets/api/noxfile.py | 222 ++++++++++++++++ .../classify_text/classify_text_tutorial.py | 118 ++++----- .../classify_text_tutorial_test.py | 35 ++- language/snippets/classify_text/noxfile.py | 222 ++++++++++++++++ language/snippets/cloud-client/v1/noxfile.py | 222 ++++++++++++++++ .../snippets/cloud-client/v1/quickstart.py | 13 +- .../cloud-client/v1/quickstart_test.py | 2 +- .../snippets/cloud-client/v1/set_endpoint.py | 10 +- .../cloud-client/v1/set_endpoint_test.py | 2 +- .../v1/language_sentiment_text.py | 10 +- .../v1/language_sentiment_text_test.py | 8 +- .../snippets/generated-samples/v1/noxfile.py | 222 ++++++++++++++++ language/snippets/sentiment/noxfile.py | 222 ++++++++++++++++ .../snippets/sentiment/sentiment_analysis.py | 32 ++- .../sentiment/sentiment_analysis_test.py | 22 +- 19 files changed, 1395 insertions(+), 265 deletions(-) create mode 100644 language/AUTHORING_GUIDE.md create mode 100644 language/CONTRIBUTING.md create mode 100644 language/snippets/api/noxfile.py create mode 100644 language/snippets/classify_text/noxfile.py create mode 100644 language/snippets/cloud-client/v1/noxfile.py create mode 100644 language/snippets/generated-samples/v1/noxfile.py create mode 100644 language/snippets/sentiment/noxfile.py diff --git a/language/AUTHORING_GUIDE.md b/language/AUTHORING_GUIDE.md new file mode 100644 index 000000000000..55c97b32f4c1 --- /dev/null +++ b/language/AUTHORING_GUIDE.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/language/CONTRIBUTING.md b/language/CONTRIBUTING.md new file mode 100644 index 000000000000..34c882b6f1a3 --- /dev/null +++ b/language/CONTRIBUTING.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file diff --git a/language/snippets/api/analyze.py b/language/snippets/api/analyze.py index a1e702b12cb5..be8652269b96 100644 --- a/language/snippets/api/analyze.py +++ b/language/snippets/api/analyze.py @@ -26,21 +26,18 @@ def get_native_encoding_type(): """Returns the encoding type that matches Python's native strings.""" if sys.maxunicode == 65535: - return 'UTF16' + return "UTF16" else: - return 'UTF32' + return "UTF32" -def analyze_entities(text, encoding='UTF32'): +def analyze_entities(text, encoding="UTF32"): body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encoding_type': encoding, + "document": {"type": "PLAIN_TEXT", "content": text}, + "encoding_type": encoding, } - service = googleapiclient.discovery.build('language', 'v1') + service = googleapiclient.discovery.build("language", "v1") request = service.documents().analyzeEntities(body=body) response = request.execute() @@ -48,16 +45,13 @@ def analyze_entities(text, encoding='UTF32'): return response -def analyze_sentiment(text, encoding='UTF32'): +def analyze_sentiment(text, encoding="UTF32"): body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encoding_type': encoding + "document": {"type": "PLAIN_TEXT", "content": text}, + "encoding_type": encoding, } - service = googleapiclient.discovery.build('language', 'v1') + service = googleapiclient.discovery.build("language", "v1") request = service.documents().analyzeSentiment(body=body) response = request.execute() @@ -65,16 +59,13 @@ def analyze_sentiment(text, encoding='UTF32'): return response -def analyze_syntax(text, encoding='UTF32'): +def analyze_syntax(text, encoding="UTF32"): body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encoding_type': encoding + "document": {"type": "PLAIN_TEXT", "content": text}, + "encoding_type": encoding, } - service = googleapiclient.discovery.build('language', 'v1') + service = googleapiclient.discovery.build("language", "v1") request = service.documents().analyzeSyntax(body=body) response = request.execute() @@ -82,21 +73,20 @@ def analyze_syntax(text, encoding='UTF32'): return response -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('command', choices=[ - 'entities', 'sentiment', 'syntax']) - parser.add_argument('text') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("command", choices=["entities", "sentiment", "syntax"]) + parser.add_argument("text") args = parser.parse_args() - if args.command == 'entities': + if args.command == "entities": result = analyze_entities(args.text, get_native_encoding_type()) - elif args.command == 'sentiment': + elif args.command == "sentiment": result = analyze_sentiment(args.text, get_native_encoding_type()) - elif args.command == 'syntax': + elif args.command == "syntax": result = analyze_syntax(args.text, get_native_encoding_type()) print(json.dumps(result, indent=2)) diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py index 08852c33ff73..b4a0db672733 100644 --- a/language/snippets/api/analyze_test.py +++ b/language/snippets/api/analyze_test.py @@ -18,46 +18,51 @@ def test_analyze_entities(): result = analyze.analyze_entities( - 'Tom Sawyer is a book written by a guy known as Mark Twain.') + "Tom Sawyer is a book written by a guy known as Mark Twain." + ) - assert result['language'] == 'en' - entities = result['entities'] + assert result["language"] == "en" + entities = result["entities"] assert len(entities) subject = entities[0] - assert subject['type'] == 'PERSON' - assert subject['name'].startswith('Tom') + assert subject["type"] == "PERSON" + assert subject["name"].startswith("Tom") def test_analyze_sentiment(capsys): - result = analyze.analyze_sentiment( - 'your face is really ugly and i hate it.') + result = analyze.analyze_sentiment("your face is really ugly and i hate it.") - sentiment = result['documentSentiment'] - assert sentiment['score'] < 0 - assert sentiment['magnitude'] < 1 + sentiment = result["documentSentiment"] + assert sentiment["score"] < 0 + assert sentiment["magnitude"] < 1 result = analyze.analyze_sentiment( - 'cheerio, mate - I greatly admire the pallor of your visage, and your ' - 'angle of repose leaves little room for improvement.') + "cheerio, mate - I greatly admire the pallor of your visage, and your " + "angle of repose leaves little room for improvement." + ) - sentiment = result['documentSentiment'] - assert sentiment['score'] > 0 - assert sentiment['magnitude'] < 1 + sentiment = result["documentSentiment"] + assert sentiment["score"] > 0 + assert sentiment["magnitude"] < 1 def test_analyze_syntax(capsys): - result = analyze.analyze_syntax(textwrap.dedent(u'''\ + result = analyze.analyze_syntax( + textwrap.dedent( + u"""\ Keep away from people who try to belittle your ambitions. Small people always do that, but the really great make you feel that you, too, can become great. - - Mark Twain''')) + - Mark Twain""" + ) + ) - assert len(result['tokens']) - first_token = result['tokens'][0] - assert first_token['text']['content'] == 'Keep' - assert first_token['partOfSpeech']['tag'] == 'VERB' - assert len(result['sentences']) > 1 - assert result['language'] == 'en' + assert len(result["tokens"]) + first_token = result["tokens"][0] + assert first_token["text"]["content"] == "Keep" + assert first_token["partOfSpeech"]["tag"] == "VERB" + assert len(result["sentences"]) > 1 + assert result["language"] == "en" def test_analyze_syntax_utf8(): @@ -67,38 +72,43 @@ def test_analyze_syntax_utf8(): bits. The offsets we get should be the index of the first byte of the character. """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - byte_array = test_string.encode('utf8') - result = analyze.analyze_syntax(test_string, encoding='UTF8') - tokens = result['tokens'] - - assert tokens[0]['text']['content'] == 'a' - offset = tokens[0]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+1].decode('utf8') == - tokens[0]['text']['content']) - - assert tokens[1]['text']['content'] == u'\u00e3' - offset = tokens[1]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+2].decode('utf8') == - tokens[1]['text']['content']) - - assert tokens[2]['text']['content'] == u'\u0201' - offset = tokens[2]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+2].decode('utf8') == - tokens[2]['text']['content']) - - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = tokens[3]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+4].decode('utf8') == - tokens[3]['text']['content']) + test_string = u"a \u00e3 \u0201 \U0001f636 b" + byte_array = test_string.encode("utf8") + result = analyze.analyze_syntax(test_string, encoding="UTF8") + tokens = result["tokens"] + + assert tokens[0]["text"]["content"] == "a" + offset = tokens[0]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 1].decode("utf8") == tokens[0]["text"]["content"] + ) + + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = tokens[1]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 2].decode("utf8") == tokens[1]["text"]["content"] + ) + + assert tokens[2]["text"]["content"] == u"\u0201" + offset = tokens[2]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 2].decode("utf8") == tokens[2]["text"]["content"] + ) + + assert tokens[3]["text"]["content"] == u"\U0001f636" + offset = tokens[3]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 4].decode("utf8") == tokens[3]["text"]["content"] + ) # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]['text']['content'] == u'b' - offset = tokens[4]['text'].get('beginOffset', 0) + assert tokens[4]["text"]["content"] == u"b" + offset = tokens[4]["text"].get("beginOffset", 0) # 'b' is only one byte long - assert (byte_array[offset:offset+1].decode('utf8') == - tokens[4]['text']['content']) + assert ( + byte_array[offset : offset + 1].decode("utf8") == tokens[4]["text"]["content"] + ) def test_analyze_syntax_utf16(): @@ -108,53 +118,58 @@ def test_analyze_syntax_utf16(): bits. The returned offsets will be the index of the first 2-byte character of the token. """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - byte_array = test_string.encode('utf16') + test_string = u"a \u00e3 \u0201 \U0001f636 b" + byte_array = test_string.encode("utf16") # Remove the byte order marker, which the offsets don't account for byte_array = byte_array[2:] - result = analyze.analyze_syntax(test_string, encoding='UTF16') - tokens = result['tokens'] + result = analyze.analyze_syntax(test_string, encoding="UTF16") + tokens = result["tokens"] - assert tokens[0]['text']['content'] == 'a' + assert tokens[0]["text"]["content"] == "a" # The offset is an offset into an array where each entry is 16 bits. Since # we have an 8-bit array, the offsets should be doubled to index into our # array. - offset = 2 * tokens[0]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[0]['text']['content']) + offset = 2 * tokens[0]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[0]["text"]["content"] + ) - assert tokens[1]['text']['content'] == u'\u00e3' - offset = 2 * tokens[1]['text'].get('beginOffset', 0) + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = 2 * tokens[1]["text"].get("beginOffset", 0) # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so # slice out 2 bytes starting from the offset. Then interpret the bytes as # utf16 for comparison. - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[1]['text']['content']) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[1]["text"]["content"] + ) - assert tokens[2]['text']['content'] == u'\u0201' - offset = 2 * tokens[2]['text'].get('beginOffset', 0) + assert tokens[2]["text"]["content"] == u"\u0201" + offset = 2 * tokens[2]["text"].get("beginOffset", 0) # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so # slice out 2 bytes starting from the offset. Then interpret the bytes as # utf16 for comparison. - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[2]['text']['content']) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[2]["text"]["content"] + ) - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = 2 * tokens[3]['text'].get('beginOffset', 0) + assert tokens[3]["text"]["content"] == u"\U0001f636" + offset = 2 * tokens[3]["text"].get("beginOffset", 0) # A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret those bytes as # utf16 for comparison. - assert (byte_array[offset:offset + 4].decode('utf16') == - tokens[3]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf16") == tokens[3]["text"]["content"] + ) # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]['text']['content'] == u'b' - offset = 2 * tokens[4]['text'].get('beginOffset', 0) + assert tokens[4]["text"]["content"] == u"b" + offset = 2 * tokens[4]["text"].get("beginOffset", 0) # Even though 'b' is only one byte long, utf16 still encodes it using 16 # bits - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[4]['text']['content']) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[4]["text"]["content"] + ) def test_annotate_text_utf32(): @@ -178,53 +193,58 @@ def test_annotate_text_utf32(): unicode object with the raw offset returned by the api (ie without multiplying it by 4, as it is below). """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - byte_array = test_string.encode('utf32') + test_string = u"a \u00e3 \u0201 \U0001f636 b" + byte_array = test_string.encode("utf32") # Remove the byte order marker, which the offsets don't account for byte_array = byte_array[4:] - result = analyze.analyze_syntax(test_string, encoding='UTF32') - tokens = result['tokens'] + result = analyze.analyze_syntax(test_string, encoding="UTF32") + tokens = result["tokens"] - assert tokens[0]['text']['content'] == 'a' + assert tokens[0]["text"]["content"] == "a" # The offset is an offset into an array where each entry is 32 bits. Since # we have an 8-bit array, the offsets should be quadrupled to index into # our array. - offset = 4 * tokens[0]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[0]['text']['content']) + offset = 4 * tokens[0]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[0]["text"]["content"] + ) - assert tokens[1]['text']['content'] == u'\u00e3' - offset = 4 * tokens[1]['text'].get('beginOffset', 0) + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = 4 * tokens[1]["text"].get("beginOffset", 0) # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret the bytes as # utf32 for comparison. - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[1]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[1]["text"]["content"] + ) - assert tokens[2]['text']['content'] == u'\u0201' - offset = 4 * tokens[2]['text'].get('beginOffset', 0) + assert tokens[2]["text"]["content"] == u"\u0201" + offset = 4 * tokens[2]["text"].get("beginOffset", 0) # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret the bytes as # utf32 for comparison. - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[2]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[2]["text"]["content"] + ) - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = 4 * tokens[3]['text'].get('beginOffset', 0) + assert tokens[3]["text"]["content"] == u"\U0001f636" + offset = 4 * tokens[3]["text"].get("beginOffset", 0) # A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret those bytes as # utf32 for comparison. - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[3]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[3]["text"]["content"] + ) # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]['text']['content'] == u'b' - offset = 4 * tokens[4]['text'].get('beginOffset', 0) + assert tokens[4]["text"]["content"] == u"b" + offset = 4 * tokens[4]["text"].get("beginOffset", 0) # Even though 'b' is only one byte long, utf32 still encodes it using 32 # bits - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[4]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[4]["text"]["content"] + ) def test_annotate_text_utf32_directly_index_into_unicode(): @@ -233,21 +253,21 @@ def test_annotate_text_utf32_directly_index_into_unicode(): See the explanation for test_annotate_text_utf32. Essentially, indexing into a utf32 array is equivalent to indexing into a python unicode object. """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - result = analyze.analyze_syntax(test_string, encoding='UTF32') - tokens = result['tokens'] + test_string = u"a \u00e3 \u0201 \U0001f636 b" + result = analyze.analyze_syntax(test_string, encoding="UTF32") + tokens = result["tokens"] - assert tokens[0]['text']['content'] == 'a' - offset = tokens[0]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[0]['text']['content'] + assert tokens[0]["text"]["content"] == "a" + offset = tokens[0]["text"].get("beginOffset", 0) + assert test_string[offset] == tokens[0]["text"]["content"] - assert tokens[1]['text']['content'] == u'\u00e3' - offset = tokens[1]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[1]['text']['content'] + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = tokens[1]["text"].get("beginOffset", 0) + assert test_string[offset] == tokens[1]["text"]["content"] - assert tokens[2]['text']['content'] == u'\u0201' - offset = tokens[2]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[2]['text']['content'] + assert tokens[2]["text"]["content"] == u"\u0201" + offset = tokens[2]["text"].get("beginOffset", 0) + assert test_string[offset] == tokens[2]["text"]["content"] # Temporarily disabled # assert tokens[3]['text']['content'] == u'\U0001f636' diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py new file mode 100644 index 000000000000..5660f08be441 --- /dev/null +++ b/language/snippets/api/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index d193e62e367b..fcd5008ba804 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -29,6 +29,7 @@ from google.cloud import language import numpy import six + # [END language_classify_text_tutorial_imports] @@ -39,8 +40,8 @@ def classify(text, verbose=True): language_client = language.LanguageServiceClient() document = language.types.Document( - content=text, - type=language.enums.Document.Type.PLAIN_TEXT) + content=text, type=language.enums.Document.Type.PLAIN_TEXT + ) response = language_client.classify_text(document) categories = response.categories @@ -55,11 +56,13 @@ def classify(text, verbose=True): if verbose: print(text) for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('category', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) + print(u"=" * 20) + print(u"{:<16}: {}".format("category", category.name)) + print(u"{:<16}: {}".format("confidence", category.confidence)) return result + + # [END language_classify_text_tutorial_classify] @@ -77,19 +80,21 @@ def index(path, index_file): continue try: - with io.open(file_path, 'r') as f: + with io.open(file_path, "r") as f: text = f.read() categories = classify(text, verbose=False) result[filename] = categories except Exception: - print('Failed to process {}'.format(file_path)) + print("Failed to process {}".format(file_path)) - with io.open(index_file, 'w', encoding='utf-8') as f: + with io.open(index_file, "w", encoding="utf-8") as f: f.write(json.dumps(result, ensure_ascii=False)) - print('Texts indexed in file: {}'.format(index_file)) + print("Texts indexed in file: {}".format(index_file)) return result + + # [END language_classify_text_tutorial_index] @@ -114,7 +119,7 @@ def split_labels(categories): """ _categories = {} for name, confidence in six.iteritems(categories): - labels = [label for label in name.split('/') if label] + labels = [label for label in name.split("/") if label] for label in labels: _categories[label] = confidence @@ -147,7 +152,7 @@ def query(index_file, text, n_top=3): the query text. """ - with io.open(index_file, 'r') as f: + with io.open(index_file, "r") as f: index = json.load(f) # Get the categories of the query text. @@ -155,22 +160,23 @@ def query(index_file, text, n_top=3): similarities = [] for filename, categories in six.iteritems(index): - similarities.append( - (filename, similarity(query_categories, categories))) + similarities.append((filename, similarity(query_categories, categories))) similarities = sorted(similarities, key=lambda p: p[1], reverse=True) - print('=' * 20) - print('Query: {}\n'.format(text)) + print("=" * 20) + print("Query: {}\n".format(text)) for category, confidence in six.iteritems(query_categories): - print('\tCategory: {}, confidence: {}'.format(category, confidence)) - print('\nMost similar {} indexed texts:'.format(n_top)) + print("\tCategory: {}, confidence: {}".format(category, confidence)) + print("\nMost similar {} indexed texts:".format(n_top)) for filename, sim in similarities[:n_top]: - print('\tFilename: {}'.format(filename)) - print('\tSimilarity: {}'.format(sim)) - print('\n') + print("\tFilename: {}".format(filename)) + print("\tSimilarity: {}".format(sim)) + print("\n") return similarities + + # [END language_classify_text_tutorial_query] @@ -183,7 +189,7 @@ def query_category(index_file, category_string, n_top=3): https://cloud.google.com/natural-language/docs/categories """ - with io.open(index_file, 'r') as f: + with io.open(index_file, "r") as f: index = json.load(f) # Make the category_string into a dictionary so that it is @@ -192,61 +198,59 @@ def query_category(index_file, category_string, n_top=3): similarities = [] for filename, categories in six.iteritems(index): - similarities.append( - (filename, similarity(query_categories, categories))) + similarities.append((filename, similarity(query_categories, categories))) similarities = sorted(similarities, key=lambda p: p[1], reverse=True) - print('=' * 20) - print('Query: {}\n'.format(category_string)) - print('\nMost similar {} indexed texts:'.format(n_top)) + print("=" * 20) + print("Query: {}\n".format(category_string)) + print("\nMost similar {} indexed texts:".format(n_top)) for filename, sim in similarities[:n_top]: - print('\tFilename: {}'.format(filename)) - print('\tSimilarity: {}'.format(sim)) - print('\n') + print("\tFilename: {}".format(filename)) + print("\tSimilarity: {}".format(sim)) + print("\n") return similarities + + # [END language_classify_text_tutorial_query_category] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command') - classify_parser = subparsers.add_parser( - 'classify', help=classify.__doc__) + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + subparsers = parser.add_subparsers(dest="command") + classify_parser = subparsers.add_parser("classify", help=classify.__doc__) classify_parser.add_argument( - 'text', help='The text to be classified. ' - 'The text needs to have at least 20 tokens.') - index_parser = subparsers.add_parser( - 'index', help=index.__doc__) + "text", + help="The text to be classified. " "The text needs to have at least 20 tokens.", + ) + index_parser = subparsers.add_parser("index", help=index.__doc__) index_parser.add_argument( - 'path', help='The directory that contains ' - 'text files to be indexed.') + "path", help="The directory that contains " "text files to be indexed." + ) index_parser.add_argument( - '--index_file', help='Filename for the output JSON.', - default='index.json') - query_parser = subparsers.add_parser( - 'query', help=query.__doc__) - query_parser.add_argument( - 'index_file', help='Path to the index JSON file.') - query_parser.add_argument( - 'text', help='Query text.') + "--index_file", help="Filename for the output JSON.", default="index.json" + ) + query_parser = subparsers.add_parser("query", help=query.__doc__) + query_parser.add_argument("index_file", help="Path to the index JSON file.") + query_parser.add_argument("text", help="Query text.") query_category_parser = subparsers.add_parser( - 'query-category', help=query_category.__doc__) - query_category_parser.add_argument( - 'index_file', help='Path to the index JSON file.') + "query-category", help=query_category.__doc__ + ) query_category_parser.add_argument( - 'category', help='Query category.') + "index_file", help="Path to the index JSON file." + ) + query_category_parser.add_argument("category", help="Query category.") args = parser.parse_args() - if args.command == 'classify': + if args.command == "classify": classify(args.text) - if args.command == 'index': + if args.command == "index": index(args.path, args.index_file) - if args.command == 'query': + if args.command == "query": query(args.index_file, args.text) - if args.command == 'query-category': + if args.command == "query-category": query_category(args.index_file, args.category) diff --git a/language/snippets/classify_text/classify_text_tutorial_test.py b/language/snippets/classify_text/classify_text_tutorial_test.py index 28de0562bdba..5e8211299bf7 100644 --- a/language/snippets/classify_text/classify_text_tutorial_test.py +++ b/language/snippets/classify_text/classify_text_tutorial_test.py @@ -18,37 +18,37 @@ import classify_text_tutorial -OUTPUT = 'index.json' -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +OUTPUT = "index.json" +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") QUERY_TEXT = """Google Home enables users to speak voice commands to interact with services through the Home\'s intelligent personal assistant called Google Assistant. A large number of services, both in-house and third-party, are integrated, allowing users to listen to music, look at videos or photos, or receive news updates entirely by voice.""" -QUERY_CATEGORY = '/Computers & Electronics/Software' +QUERY_CATEGORY = "/Computers & Electronics/Software" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def index_file(tmpdir_factory): - temp_file = tmpdir_factory.mktemp('tmp').join(OUTPUT) + temp_file = tmpdir_factory.mktemp("tmp").join(OUTPUT) temp_out = temp_file.strpath - classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + classify_text_tutorial.index(os.path.join(RESOURCES, "texts"), temp_out) return temp_file def test_classify(capsys): - with open(os.path.join(RESOURCES, 'query_text1.txt'), 'r') as f: + with open(os.path.join(RESOURCES, "query_text1.txt"), "r") as f: text = f.read() classify_text_tutorial.classify(text) out, err = capsys.readouterr() - assert 'category' in out + assert "category" in out def test_index(capsys, tmpdir): - temp_dir = tmpdir.mkdir('tmp') + temp_dir = tmpdir.mkdir("tmp") temp_out = temp_dir.join(OUTPUT).strpath - classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + classify_text_tutorial.index(os.path.join(RESOURCES, "texts"), temp_out) out, err = capsys.readouterr() assert OUTPUT in out @@ -61,7 +61,7 @@ def test_query_text(capsys, index_file): classify_text_tutorial.query(temp_out, QUERY_TEXT) out, err = capsys.readouterr() - assert 'Filename: cloud_computing.txt' in out + assert "Filename: cloud_computing.txt" in out def test_query_category(capsys, index_file): @@ -70,22 +70,21 @@ def test_query_category(capsys, index_file): classify_text_tutorial.query_category(temp_out, QUERY_CATEGORY) out, err = capsys.readouterr() - assert 'Filename: cloud_computing.txt' in out + assert "Filename: cloud_computing.txt" in out def test_split_labels(): - categories = {'/a/b/c': 1.0} - split_categories = {'a': 1.0, 'b': 1.0, 'c': 1.0} + categories = {"/a/b/c": 1.0} + split_categories = {"a": 1.0, "b": 1.0, "c": 1.0} assert classify_text_tutorial.split_labels(categories) == split_categories def test_similarity(): empty_categories = {} - categories1 = {'/a/b/c': 1.0, '/d/e': 1.0} - categories2 = {'/a/b': 1.0} + categories1 = {"/a/b/c": 1.0, "/d/e": 1.0} + categories2 = {"/a/b": 1.0} - assert classify_text_tutorial.similarity( - empty_categories, categories1) == 0.0 + assert classify_text_tutorial.similarity(empty_categories, categories1) == 0.0 assert classify_text_tutorial.similarity(categories1, categories1) > 0.99 assert classify_text_tutorial.similarity(categories1, categories2) > 0 assert classify_text_tutorial.similarity(categories1, categories2) < 1 diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py new file mode 100644 index 000000000000..5660f08be441 --- /dev/null +++ b/language/snippets/classify_text/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py new file mode 100644 index 000000000000..5660f08be441 --- /dev/null +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index 7c075a513b64..2cf46437283e 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -22,6 +22,7 @@ def run_quickstart(): from google.cloud import language from google.cloud.language import enums from google.cloud.language import types + # [END language_python_migration_imports] # Instantiates a client @@ -30,18 +31,16 @@ def run_quickstart(): # [END language_python_migration_client] # The text to analyze - text = u'Hello, world!' - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) + text = u"Hello, world!" + document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) # Detects the sentiment of the text sentiment = client.analyze_sentiment(document=document).document_sentiment - print('Text: {}'.format(text)) - print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + print("Text: {}".format(text)) + print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) # [END language_quickstart] -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/language/snippets/cloud-client/v1/quickstart_test.py b/language/snippets/cloud-client/v1/quickstart_test.py index bd9954c83bb7..59b44da841d4 100644 --- a/language/snippets/cloud-client/v1/quickstart_test.py +++ b/language/snippets/cloud-client/v1/quickstart_test.py @@ -19,4 +19,4 @@ def test_quickstart(capsys): quickstart.run_quickstart() out, _ = capsys.readouterr() - assert 'Sentiment' in out + assert "Sentiment" in out diff --git a/language/snippets/cloud-client/v1/set_endpoint.py b/language/snippets/cloud-client/v1/set_endpoint.py index abc6f180a523..340d518071c8 100644 --- a/language/snippets/cloud-client/v1/set_endpoint.py +++ b/language/snippets/cloud-client/v1/set_endpoint.py @@ -19,7 +19,7 @@ def set_endpoint(): # Imports the Google Cloud client library from google.cloud import language - client_options = {'api_endpoint': 'eu-language.googleapis.com:443'} + client_options = {"api_endpoint": "eu-language.googleapis.com:443"} # Instantiates a client client = language.LanguageServiceClient(client_options=client_options) @@ -27,14 +27,14 @@ def set_endpoint(): # The text to analyze document = language.types.Document( - content='Hello, world!', - type=language.enums.Document.Type.PLAIN_TEXT) + content="Hello, world!", type=language.enums.Document.Type.PLAIN_TEXT + ) # Detects the sentiment of the text sentiment = client.analyze_sentiment(document=document).document_sentiment - print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) -if __name__ == '__main__': +if __name__ == "__main__": set_endpoint() diff --git a/language/snippets/cloud-client/v1/set_endpoint_test.py b/language/snippets/cloud-client/v1/set_endpoint_test.py index 7e124c36a93d..817748b12be4 100644 --- a/language/snippets/cloud-client/v1/set_endpoint_test.py +++ b/language/snippets/cloud-client/v1/set_endpoint_test.py @@ -19,4 +19,4 @@ def test_set_endpoint(capsys): set_endpoint.set_endpoint() out, _ = capsys.readouterr() - assert 'Sentiment' in out + assert "Sentiment" in out diff --git a/language/snippets/generated-samples/v1/language_sentiment_text.py b/language/snippets/generated-samples/v1/language_sentiment_text.py index 10d17970df08..c28a366583d9 100644 --- a/language/snippets/generated-samples/v1/language_sentiment_text.py +++ b/language/snippets/generated-samples/v1/language_sentiment_text.py @@ -35,15 +35,15 @@ def sample_analyze_sentiment(content): # content = 'Your text to analyze, e.g. Hello, world!' if isinstance(content, six.binary_type): - content = content.decode('utf-8') + content = content.decode("utf-8") type_ = enums.Document.Type.PLAIN_TEXT - document = {'type': type_, 'content': content} + document = {"type": type_, "content": content} response = client.analyze_sentiment(document) sentiment = response.document_sentiment - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) + print("Score: {}".format(sentiment.score)) + print("Magnitude: {}".format(sentiment.magnitude)) # [END language_sentiment_text] @@ -54,5 +54,5 @@ def main(): sample_analyze_sentiment(*sys.argv[1:]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/language/snippets/generated-samples/v1/language_sentiment_text_test.py b/language/snippets/generated-samples/v1/language_sentiment_text_test.py index e1876da27525..fd89f626516f 100644 --- a/language/snippets/generated-samples/v1/language_sentiment_text_test.py +++ b/language/snippets/generated-samples/v1/language_sentiment_text_test.py @@ -17,12 +17,12 @@ def test_analyze_sentiment_text_positive(capsys): - language_sentiment_text.sample_analyze_sentiment('Happy Happy Joy Joy') + language_sentiment_text.sample_analyze_sentiment("Happy Happy Joy Joy") out, _ = capsys.readouterr() - assert 'Score: 0.' in out + assert "Score: 0." in out def test_analyze_sentiment_text_negative(capsys): - language_sentiment_text.sample_analyze_sentiment('Angry Angry Sad Sad') + language_sentiment_text.sample_analyze_sentiment("Angry Angry Sad Sad") out, _ = capsys.readouterr() - assert 'Score: -0.' in out + assert "Score: -0." in out diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py new file mode 100644 index 000000000000..5660f08be441 --- /dev/null +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py new file mode 100644 index 000000000000..5660f08be441 --- /dev/null +++ b/language/snippets/sentiment/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index 3b572bc2c94d..aef7a6586c2c 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -20,6 +20,7 @@ from google.cloud import language from google.cloud.language import enums from google.cloud.language import types + # [END language_sentiment_tutorial_imports] @@ -30,12 +31,16 @@ def print_result(annotations): for index, sentence in enumerate(annotations.sentences): sentence_sentiment = sentence.sentiment.score - print('Sentence {} has a sentiment score of {}'.format( - index, sentence_sentiment)) + print( + "Sentence {} has a sentiment score of {}".format(index, sentence_sentiment) + ) - print('Overall Sentiment: score of {} with magnitude of {}'.format( - score, magnitude)) + print( + "Overall Sentiment: score of {} with magnitude of {}".format(score, magnitude) + ) return 0 + + # [END language_sentiment_tutorial_print_result] @@ -44,28 +49,29 @@ def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" client = language.LanguageServiceClient() - with open(movie_review_filename, 'r') as review_file: + with open(movie_review_filename, "r") as review_file: # Instantiates a plain text document. content = review_file.read() - document = types.Document( - content=content, - type=enums.Document.Type.PLAIN_TEXT) + document = types.Document(content=content, type=enums.Document.Type.PLAIN_TEXT) annotations = client.analyze_sentiment(document=document) # Print the results print_result(annotations) + + # [END language_sentiment_tutorial_analyze_sentiment] # [START language_sentiment_tutorial_run_application] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) parser.add_argument( - 'movie_review_filename', - help='The filename of the movie review you\'d like to analyze.') + "movie_review_filename", + help="The filename of the movie review you'd like to analyze.", + ) args = parser.parse_args() analyze(args.movie_review_filename) diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index 05d28ab27898..845e842f7517 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -16,35 +16,35 @@ from sentiment_analysis import analyze -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_pos(capsys): - analyze(os.path.join(RESOURCES, 'pos.txt')) + analyze(os.path.join(RESOURCES, "pos.txt")) out, err = capsys.readouterr() - score = float(re.search('score of (.+?) with', out).group(1)) - magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + score = float(re.search("score of (.+?) with", out).group(1)) + magnitude = float(re.search("magnitude of (.+?)", out).group(1)) assert score * magnitude > 0 def test_neg(capsys): - analyze(os.path.join(RESOURCES, 'neg.txt')) + analyze(os.path.join(RESOURCES, "neg.txt")) out, err = capsys.readouterr() - score = float(re.search('score of (.+?) with', out).group(1)) - magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + score = float(re.search("score of (.+?) with", out).group(1)) + magnitude = float(re.search("magnitude of (.+?)", out).group(1)) assert score * magnitude < 0 def test_mixed(capsys): - analyze(os.path.join(RESOURCES, 'mixed.txt')) + analyze(os.path.join(RESOURCES, "mixed.txt")) out, err = capsys.readouterr() - score = float(re.search('score of (.+?) with', out).group(1)) + score = float(re.search("score of (.+?) with", out).group(1)) assert score <= 0.3 assert score >= -0.3 def test_neutral(capsys): - analyze(os.path.join(RESOURCES, 'neutral.txt')) + analyze(os.path.join(RESOURCES, "neutral.txt")) out, err = capsys.readouterr() - magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + magnitude = float(re.search("magnitude of (.+?)", out).group(1)) assert magnitude <= 2.0 From 25c46dd68f6d31e226ee5d21bf79eeb49ee121cc Mon Sep 17 00:00:00 2001 From: hkdevandla <60490673+hkdevandla@users.noreply.github.com> Date: Fri, 16 Oct 2020 14:17:33 -0700 Subject: [PATCH 190/323] feat: Migrate API to use python micro-generator (#41) * migrate API to use micro-generator * migrate API to use micro-generator * update * doc changes * add samples * add samples * add samples and readme * Update README.md * Update README.md * Update UPGRADING.md file * update synth.py Co-authored-by: arithmetic1728 --- language/snippets/api/README.rst | 4 +++ language/snippets/api/analyze_test.py | 3 +-- language/snippets/api/noxfile.py | 26 ++++++++++--------- language/snippets/classify_text/README.rst | 4 +++ .../classify_text/classify_text_tutorial.py | 10 +++---- language/snippets/classify_text/noxfile.py | 26 ++++++++++--------- language/snippets/cloud-client/v1/noxfile.py | 26 ++++++++++--------- .../snippets/cloud-client/v1/quickstart.py | 10 +++---- .../snippets/cloud-client/v1/set_endpoint.py | 10 +++---- .../v1/language_sentiment_text.py | 7 +++-- .../snippets/generated-samples/v1/noxfile.py | 26 ++++++++++--------- language/snippets/sentiment/noxfile.py | 26 ++++++++++--------- .../snippets/sentiment/sentiment_analysis.py | 10 +++---- language/v1/language_classify_gcs.py | 6 ++--- language/v1/language_classify_text.py | 6 ++--- language/v1/language_entities_gcs.py | 12 ++++----- language/v1/language_entities_text.py | 12 ++++----- language/v1/language_entity_sentiment_gcs.py | 12 ++++----- language/v1/language_entity_sentiment_text.py | 12 ++++----- language/v1/language_sentiment_gcs.py | 8 +++--- language/v1/language_sentiment_text.py | 8 +++--- language/v1/language_syntax_gcs.py | 16 +++++------- language/v1/language_syntax_text.py | 16 +++++------- 23 files changed, 144 insertions(+), 152 deletions(-) diff --git a/language/snippets/api/README.rst b/language/snippets/api/README.rst index 5f4edfd27738..0d9d945111a5 100644 --- a/language/snippets/api/README.rst +++ b/language/snippets/api/README.rst @@ -14,6 +14,10 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog .. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + + + + Setup ------------------------------------------------------------------------------- diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py index b4a0db672733..c797e2e3364c 100644 --- a/language/snippets/api/analyze_test.py +++ b/language/snippets/api/analyze_test.py @@ -37,8 +37,7 @@ def test_analyze_sentiment(capsys): assert sentiment["magnitude"] < 1 result = analyze.analyze_sentiment( - "cheerio, mate - I greatly admire the pallor of your visage, and your " - "angle of repose leaves little room for improvement." + "cheerio, mate - I greatly admire the pallor of your visage, and your angle of repose leaves little room for improvement." ) sentiment = result["documentSentiment"] diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 5660f08be441..ba55d7ce53ca 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/language/snippets/classify_text/README.rst b/language/snippets/classify_text/README.rst index a1112f21d016..757debb0946f 100644 --- a/language/snippets/classify_text/README.rst +++ b/language/snippets/classify_text/README.rst @@ -18,6 +18,10 @@ This tutorial demostrates how to use the `classify_text` method to classify cont .. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + + + + Setup ------------------------------------------------------------------------------- diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index fcd5008ba804..9c05b83f589c 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -26,7 +26,7 @@ import json import os -from google.cloud import language +from google.cloud import language_v1 import numpy import six @@ -37,12 +37,12 @@ def classify(text, verbose=True): """Classify the input text into categories. """ - language_client = language.LanguageServiceClient() + language_client = language_v1.LanguageServiceClient() - document = language.types.Document( - content=text, type=language.enums.Document.Type.PLAIN_TEXT + document = language_v1.Document( + content=text, type_=language_v1.Document.Type.PLAIN_TEXT ) - response = language_client.classify_text(document) + response = language_client.classify_text(request={'document': document}) categories = response.categories result = {} diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 5660f08be441..ba55d7ce53ca 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 5660f08be441..ba55d7ce53ca 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index 2cf46437283e..4c4b06b52a14 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -19,23 +19,21 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library # [START language_python_migration_imports] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types + from google.cloud import language_v1 # [END language_python_migration_imports] # Instantiates a client # [START language_python_migration_client] - client = language.LanguageServiceClient() + client = language_v1.LanguageServiceClient() # [END language_python_migration_client] # The text to analyze text = u"Hello, world!" - document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) + document = language_v1.Document(content=text, type_=language_v1.Document.Type.PLAIN_TEXT) # Detects the sentiment of the text - sentiment = client.analyze_sentiment(document=document).document_sentiment + sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment print("Text: {}".format(text)) print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/cloud-client/v1/set_endpoint.py b/language/snippets/cloud-client/v1/set_endpoint.py index 340d518071c8..e9ad97d3e4b1 100644 --- a/language/snippets/cloud-client/v1/set_endpoint.py +++ b/language/snippets/cloud-client/v1/set_endpoint.py @@ -17,21 +17,21 @@ def set_endpoint(): """Change your endpoint""" # [START language_set_endpoint] # Imports the Google Cloud client library - from google.cloud import language + from google.cloud import language_v1 client_options = {"api_endpoint": "eu-language.googleapis.com:443"} # Instantiates a client - client = language.LanguageServiceClient(client_options=client_options) + client = language_v1.LanguageServiceClient(client_options=client_options) # [END language_set_endpoint] # The text to analyze - document = language.types.Document( - content="Hello, world!", type=language.enums.Document.Type.PLAIN_TEXT + document = language_v1.Document( + content="Hello, world!", type_=language_v1.Document.Type.PLAIN_TEXT ) # Detects the sentiment of the text - sentiment = client.analyze_sentiment(document=document).document_sentiment + sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/generated-samples/v1/language_sentiment_text.py b/language/snippets/generated-samples/v1/language_sentiment_text.py index c28a366583d9..9f975023114f 100644 --- a/language/snippets/generated-samples/v1/language_sentiment_text.py +++ b/language/snippets/generated-samples/v1/language_sentiment_text.py @@ -24,7 +24,6 @@ # [START language_sentiment_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums import six @@ -37,10 +36,10 @@ def sample_analyze_sentiment(content): if isinstance(content, six.binary_type): content = content.decode("utf-8") - type_ = enums.Document.Type.PLAIN_TEXT - document = {"type": type_, "content": content} + type_ = language_v1.Document.Type.PLAIN_TEXT + document = {"type_": type_, "content": content} - response = client.analyze_sentiment(document) + response = client.analyze_sentiment(request={'document': document}) sentiment = response.document_sentiment print("Score: {}".format(sentiment.score)) print("Magnitude: {}".format(sentiment.magnitude)) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 5660f08be441..ba55d7ce53ca 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 5660f08be441..ba55d7ce53ca 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index aef7a6586c2c..2333bf8238ab 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -17,9 +17,7 @@ # [START language_sentiment_tutorial_imports] import argparse -from google.cloud import language -from google.cloud.language import enums -from google.cloud.language import types +from google.cloud import language_v1 # [END language_sentiment_tutorial_imports] @@ -47,14 +45,14 @@ def print_result(annotations): # [START language_sentiment_tutorial_analyze_sentiment] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" - client = language.LanguageServiceClient() + client = language_v1.LanguageServiceClient() with open(movie_review_filename, "r") as review_file: # Instantiates a plain text document. content = review_file.read() - document = types.Document(content=content, type=enums.Document.Type.PLAIN_TEXT) - annotations = client.analyze_sentiment(document=document) + document = language_v1.Document(content=content, type_=language_v1.Document.Type.PLAIN_TEXT) + annotations = client.analyze_sentiment(request={'document': document}) # Print the results print_result(annotations) diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py index 941640b10772..a20789cc8af7 100644 --- a/language/v1/language_classify_gcs.py +++ b/language/v1/language_classify_gcs.py @@ -26,8 +26,6 @@ # [START language_classify_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_classify_text(gcs_content_uri): """ @@ -44,7 +42,7 @@ def sample_classify_text(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,7 +50,7 @@ def sample_classify_text(gcs_content_uri): language = "en" document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} - response = client.classify_text(document) + response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index 52175f02db7a..ad55d26cd110 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -26,8 +26,6 @@ # [START language_classify_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_classify_text(text_content): """ @@ -42,7 +40,7 @@ def sample_classify_text(text_content): # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -50,7 +48,7 @@ def sample_classify_text(text_content): language = "en" document = {"content": text_content, "type": type_, "language": language} - response = client.classify_text(document) + response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. diff --git a/language/v1/language_entities_gcs.py b/language/v1/language_entities_gcs.py index 790592ca158e..d735e885dc7c 100644 --- a/language/v1/language_entities_gcs.py +++ b/language/v1/language_entities_gcs.py @@ -26,8 +26,6 @@ # [START language_entities_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entities(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_entities(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,14 +50,14 @@ def sample_analyze_entities(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1..EncodingType.UTF8 - response = client.analyze_entities(document, encoding_type=encoding_type) + response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Loop over the metadata associated with entity. For many known entities, @@ -75,7 +73,7 @@ def sample_analyze_entities(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index 464a313d3029..db2ad9e2d7c2 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -26,8 +26,6 @@ # [START language_entities_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entities(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_entities(text_content): # text_content = 'California is a state.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,16 +49,16 @@ def sample_analyze_entities(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entities(document, encoding_type=encoding_type) + response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) @@ -79,7 +77,7 @@ def sample_analyze_entities(text_content): # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_entity_sentiment_gcs.py b/language/v1/language_entity_sentiment_gcs.py index 9fafa737e5a4..2a4c6ff368c1 100644 --- a/language/v1/language_entity_sentiment_gcs.py +++ b/language/v1/language_entity_sentiment_gcs.py @@ -26,8 +26,6 @@ # [START language_entity_sentiment_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entity_sentiment(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/entity-sentiment.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,14 +50,14 @@ def sample_analyze_entity_sentiment(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -79,7 +77,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_entity_sentiment_text.py b/language/v1/language_entity_sentiment_text.py index 9b3d5b8a897f..20c9dbd83472 100644 --- a/language/v1/language_entity_sentiment_text.py +++ b/language/v1/language_entity_sentiment_text.py @@ -26,8 +26,6 @@ # [START language_entity_sentiment_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entity_sentiment(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_entity_sentiment(text_content): # text_content = 'Grapes are good. Bananas are bad.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,14 +49,14 @@ def sample_analyze_entity_sentiment(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -78,7 +76,7 @@ def sample_analyze_entity_sentiment(text_content): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_sentiment_gcs.py b/language/v1/language_sentiment_gcs.py index 261f2f3e6233..68839805cc1f 100644 --- a/language/v1/language_sentiment_gcs.py +++ b/language/v1/language_sentiment_gcs.py @@ -26,8 +26,6 @@ # [START language_sentiment_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_sentiment(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_sentiment(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/sentiment-positive.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,9 +50,9 @@ def sample_analyze_sentiment(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_sentiment(document, encoding_type=encoding_type) + response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Get overall sentiment of the input document print(u"Document sentiment score: {}".format(response.document_sentiment.score)) print( diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py index 12f1e22113c0..0be2b6cf208d 100644 --- a/language/v1/language_sentiment_text.py +++ b/language/v1/language_sentiment_text.py @@ -26,8 +26,6 @@ # [START language_sentiment_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_sentiment(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_sentiment(text_content): # text_content = 'I am so happy and joyful.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,9 +49,9 @@ def sample_analyze_sentiment(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_sentiment(document, encoding_type=encoding_type) + response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Get overall sentiment of the input document print(u"Document sentiment score: {}".format(response.document_sentiment.score)) print( diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py index 32bf2acb589e..e04be4064549 100644 --- a/language/v1/language_syntax_gcs.py +++ b/language/v1/language_syntax_gcs.py @@ -26,8 +26,6 @@ # [START language_syntax_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_syntax(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_syntax(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,9 +50,9 @@ def sample_analyze_syntax(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_syntax(document, encoding_type=encoding_type) + response = client.analyze_syntax(request = {'document': document, 'encoding_type': encoding_type}) # Loop through tokens returned from the API for token in response.tokens: # Get the text content of this token. Usually a word or punctuation. @@ -70,13 +68,13 @@ def sample_analyze_syntax(gcs_content_uri): # Get the tag, e.g. NOUN, ADJ for Adjective, et al. print( u"Part of Speech tag: {}".format( - enums.PartOfSpeech.Tag(part_of_speech.tag).name + language_v1.PartOfSpeech.Tag(part_of_speech.tag).name ) ) # Get the voice, e.g. ACTIVE or PASSIVE - print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + print(u"Voice: {}".format(language_v1.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. - print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + print(u"Tense: {}".format(language_v1.PartOfSpeech.Tense(part_of_speech.tense).name)) # See API reference for additional Part of Speech information available # Get the lemma of the token. Wikipedia lemma description # https://en.wikipedia.org/wiki/Lemma_(morphology) @@ -87,7 +85,7 @@ def sample_analyze_syntax(gcs_content_uri): dependency_edge = token.dependency_edge print(u"Head token index: {}".format(dependency_edge.head_token_index)) print( - u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + u"Label: {}".format(language_v1.DependencyEdge.Label(dependency_edge.label).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index 290418864675..9f37e92ce7e5 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -26,8 +26,6 @@ # [START language_syntax_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_syntax(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_syntax(text_content): # text_content = 'This is a short sentence.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,9 +49,9 @@ def sample_analyze_syntax(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_syntax(document, encoding_type=encoding_type) + response = client.analyze_syntax(request = {'document': document, 'encoding_type': encoding_type}) # Loop through tokens returned from the API for token in response.tokens: # Get the text content of this token. Usually a word or punctuation. @@ -69,13 +67,13 @@ def sample_analyze_syntax(text_content): # Get the tag, e.g. NOUN, ADJ for Adjective, et al. print( u"Part of Speech tag: {}".format( - enums.PartOfSpeech.Tag(part_of_speech.tag).name + language_v1.PartOfSpeech.Tag(part_of_speech.tag).name ) ) # Get the voice, e.g. ACTIVE or PASSIVE - print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + print(u"Voice: {}".format(language_v1.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. - print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + print(u"Tense: {}".format(language_v1.PartOfSpeech.Tense(part_of_speech.tense).name)) # See API reference for additional Part of Speech information available # Get the lemma of the token. Wikipedia lemma description # https://en.wikipedia.org/wiki/Lemma_(morphology) @@ -86,7 +84,7 @@ def sample_analyze_syntax(text_content): dependency_edge = token.dependency_edge print(u"Head token index: {}".format(dependency_edge.head_token_index)) print( - u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + u"Label: {}".format(language_v1.DependencyEdge.Label(dependency_edge.label).name) ) # Get the language of the text, which will be the same as From bd3f41b7f1d7d872c35e9e8f3b5e5a5d6968040e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 28 Oct 2020 19:18:29 +0100 Subject: [PATCH 191/323] chore(deps): update dependency google-auth to v1.22.1 (#45) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 72a261b14e37..be79e75b835f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.2 -google-auth==1.21.3 +google-auth==1.22.1 google-auth-httplib2==0.0.4 From 05c440248e9243f14aac74af570d285b888ee83f Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Tue, 3 Nov 2020 12:53:28 -0800 Subject: [PATCH 192/323] fix: adds underscore to "type" to NL API samples (#49) * fix: adds underscore to "type" in entity sentiment sample * fix: other language samples missing type with underscore --- language/v1/language_classify_gcs.py | 2 +- language/v1/language_classify_text.py | 2 +- language/v1/language_entities_gcs.py | 8 ++++---- language/v1/language_entities_text.py | 6 +++--- language/v1/language_entity_sentiment_gcs.py | 6 +++--- language/v1/language_entity_sentiment_text.py | 6 +++--- language/v1/language_sentiment_gcs.py | 2 +- language/v1/language_sentiment_text.py | 2 +- language/v1/language_syntax_gcs.py | 2 +- language/v1/language_syntax_text.py | 2 +- 10 files changed, 19 insertions(+), 19 deletions(-) diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py index a20789cc8af7..b357a8aed07e 100644 --- a/language/v1/language_classify_gcs.py +++ b/language/v1/language_classify_gcs.py @@ -48,7 +48,7 @@ def sample_classify_text(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index ad55d26cd110..6fe2aaa4b60d 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -46,7 +46,7 @@ def sample_classify_text(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API diff --git a/language/v1/language_entities_gcs.py b/language/v1/language_entities_gcs.py index d735e885dc7c..6bdb85772936 100644 --- a/language/v1/language_entities_gcs.py +++ b/language/v1/language_entities_gcs.py @@ -47,17 +47,17 @@ def sample_analyze_entities(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = language_v1..EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Loop over the metadata associated with entity. For many known entities, @@ -73,7 +73,7 @@ def sample_analyze_entities(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index db2ad9e2d7c2..2cce0015d04b 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -46,7 +46,7 @@ def sample_analyze_entities(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 @@ -58,7 +58,7 @@ def sample_analyze_entities(text_content): print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) @@ -77,7 +77,7 @@ def sample_analyze_entities(text_content): # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_entity_sentiment_gcs.py b/language/v1/language_entity_sentiment_gcs.py index 2a4c6ff368c1..dba3dc1bb76a 100644 --- a/language/v1/language_entity_sentiment_gcs.py +++ b/language/v1/language_entity_sentiment_gcs.py @@ -47,7 +47,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 @@ -57,7 +57,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -77,7 +77,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_entity_sentiment_text.py b/language/v1/language_entity_sentiment_text.py index 20c9dbd83472..b28434df9c7a 100644 --- a/language/v1/language_entity_sentiment_text.py +++ b/language/v1/language_entity_sentiment_text.py @@ -46,7 +46,7 @@ def sample_analyze_entity_sentiment(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 @@ -56,7 +56,7 @@ def sample_analyze_entity_sentiment(text_content): for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -76,7 +76,7 @@ def sample_analyze_entity_sentiment(text_content): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/language/v1/language_sentiment_gcs.py b/language/v1/language_sentiment_gcs.py index 68839805cc1f..f225db1c022d 100644 --- a/language/v1/language_sentiment_gcs.py +++ b/language/v1/language_sentiment_gcs.py @@ -47,7 +47,7 @@ def sample_analyze_sentiment(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py index 0be2b6cf208d..d94420a39277 100644 --- a/language/v1/language_sentiment_text.py +++ b/language/v1/language_sentiment_text.py @@ -46,7 +46,7 @@ def sample_analyze_sentiment(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py index e04be4064549..4e8a5cc45bfe 100644 --- a/language/v1/language_syntax_gcs.py +++ b/language/v1/language_syntax_gcs.py @@ -47,7 +47,7 @@ def sample_analyze_syntax(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index 9f37e92ce7e5..c3eb9383cf6f 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -46,7 +46,7 @@ def sample_analyze_syntax(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 From 66091a15c292e86ba56f379dbbf0fdb5684d3a1d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 11 Nov 2020 20:57:38 +0100 Subject: [PATCH 193/323] chore(deps): update dependency google-api-python-client to v1.12.5 (#44) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index be79e75b835f..347f8cb7bfa9 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.12.2 +google-api-python-client==1.12.5 google-auth==1.22.1 google-auth-httplib2==0.0.4 From 38a7893d34a979c0959320603acde62857a01646 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 11 Nov 2020 22:31:38 +0100 Subject: [PATCH 194/323] chore(deps): update dependency google-cloud-language to v2 (#48) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index de040ee00ca4..fc7f0cb6da2a 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.3.0 +google-cloud-language==2.0.0 numpy==1.19.2 diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 0c011f546e87..83a8cba4735e 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.3.0 +google-cloud-language==2.0.0 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 0c011f546e87..83a8cba4735e 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.3.0 +google-cloud-language==2.0.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 0c011f546e87..83a8cba4735e 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.3.0 +google-cloud-language==2.0.0 From f80bd636cb8d70fbd522771562666b291bd97de0 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 12 Nov 2020 19:13:25 +0100 Subject: [PATCH 195/323] chore(deps): update dependency google-auth to v1.23.0 (#52) Co-authored-by: Takashi Matsuo --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 347f8cb7bfa9..422e819d9a76 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.5 -google-auth==1.22.1 +google-auth==1.23.0 google-auth-httplib2==0.0.4 From 7291c06106db9c25cf6a4b369b2041b78de17fa9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 13 Nov 2020 01:33:32 +0100 Subject: [PATCH 196/323] chore(deps): update dependency numpy to v1.19.4 (#53) Co-authored-by: Dina Graves Portman --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index fc7f0cb6da2a..d1ae7cf21a09 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==2.0.0 -numpy==1.19.2 +numpy==1.19.4 \ No newline at end of file From e3409759574351af620ce36c7309350dde32c3a6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 25 Nov 2020 20:31:03 +0100 Subject: [PATCH 197/323] chore(deps): update dependency google-api-python-client to v1.12.8 (#54) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 422e819d9a76..0026146b0dba 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.12.5 +google-api-python-client==1.12.8 google-auth==1.23.0 google-auth-httplib2==0.0.4 From 191191c2459d5e337283e56086b28c3d87f23e9a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 28 Dec 2020 09:14:31 -0800 Subject: [PATCH 198/323] chore: update templates (#56) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * chore(python): use 'setup.py' to detect repo root Closes #792 Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Fri Oct 9 15:06:33 2020 -0600 Source-Repo: googleapis/synthtool Source-Sha: e0ae456852bf22f38796deb79cff30b516fde244 Source-Link: https://github.com/googleapis/synthtool/commit/e0ae456852bf22f38796deb79cff30b516fde244 * build(python): samples tests should pass if no samples exist Source-Author: Daniel Sanche Source-Date: Wed Oct 14 08:00:06 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 477764cc4ee6db346d3febef2bb1ea0abf27de52 Source-Link: https://github.com/googleapis/synthtool/commit/477764cc4ee6db346d3febef2bb1ea0abf27de52 * chore(python_library): change the docs bucket name Source-Author: Takashi Matsuo Source-Date: Fri Oct 16 09:58:05 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: da5c6050d13b4950c82666a81d8acd25157664ae Source-Link: https://github.com/googleapis/synthtool/commit/da5c6050d13b4950c82666a81d8acd25157664ae * chore(docs): update code of conduct of synthtool and templates Source-Author: Christopher Wilcox Source-Date: Thu Oct 22 14:22:01 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 5f6ef0ec5501d33c4667885b37a7685a30d41a76 Source-Link: https://github.com/googleapis/synthtool/commit/5f6ef0ec5501d33c4667885b37a7685a30d41a76 * docs: add proto-plus to intersphinx mapping Source-Author: Tim Swast Source-Date: Tue Oct 27 12:01:14 2020 -0500 Source-Repo: googleapis/synthtool Source-Sha: ea52b8a0bd560f72f376efcf45197fb7c8869120 Source-Link: https://github.com/googleapis/synthtool/commit/ea52b8a0bd560f72f376efcf45197fb7c8869120 * fix(python_library): fix external unit test dependencies I recently submitted https://github.com/googleapis/synthtool/pull/811/files, allowing external dependencies for unit tests. This fixes a small missing comma bug Source-Author: Daniel Sanche Source-Date: Thu Oct 29 16:58:01 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 6542bd723403513626f61642fc02ddca528409aa Source-Link: https://github.com/googleapis/synthtool/commit/6542bd723403513626f61642fc02ddca528409aa * chore: add type hint check Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Wed Nov 4 17:36:32 2020 -0800 Source-Repo: googleapis/synthtool Source-Sha: 3d3e94c4e02370f307a9a200b0c743c3d8d19f29 Source-Link: https://github.com/googleapis/synthtool/commit/3d3e94c4e02370f307a9a200b0c743c3d8d19f29 * chore: add blacken to template Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Thu Nov 5 15:22:03 2020 -0800 Source-Repo: googleapis/synthtool Source-Sha: 1f1148d3c7a7a52f0c98077f976bd9b3c948ee2b Source-Link: https://github.com/googleapis/synthtool/commit/1f1148d3c7a7a52f0c98077f976bd9b3c948ee2b * fix: address lint issues Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Thu Nov 12 11:30:49 2020 -0800 Source-Repo: googleapis/synthtool Source-Sha: e89175cf074dccc4babb4eca66ae913696e47a71 Source-Link: https://github.com/googleapis/synthtool/commit/e89175cf074dccc4babb4eca66ae913696e47a71 * docs(python): update intersphinx for grpc and auth * docs(python): update intersphinx for grpc and auth * use https for python intersphinx Co-authored-by: Tim Swast Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Nov 18 14:37:25 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 9a7d9fbb7045c34c9d3d22c1ff766eeae51f04c9 Source-Link: https://github.com/googleapis/synthtool/commit/9a7d9fbb7045c34c9d3d22c1ff766eeae51f04c9 * docs(python): fix intersphinx link for google-auth Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu Nov 19 10:16:05 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: a073c873f3928c561bdf87fdfbf1d081d1998984 Source-Link: https://github.com/googleapis/synthtool/commit/a073c873f3928c561bdf87fdfbf1d081d1998984 Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> --- language/snippets/api/noxfile.py | 24 ++++++++++++++++++- language/snippets/classify_text/noxfile.py | 24 ++++++++++++++++++- language/snippets/cloud-client/v1/noxfile.py | 24 ++++++++++++++++++- .../snippets/generated-samples/v1/noxfile.py | 24 ++++++++++++++++++- language/snippets/sentiment/noxfile.py | 24 ++++++++++++++++++- 5 files changed, 115 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index ba55d7ce53ca..b90eef00f2d9 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -39,6 +39,10 @@ # You can opt out from the test for specific Python versions. 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string @@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - session.install("flake8", "flake8-import-order") + if not TEST_CONFIG['enforce_type_hints']: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ @@ -141,8 +148,18 @@ def lint(session): "." ] session.run("flake8", *args) +# +# Black +# +@nox.session +def blacken(session): + session.install("black") + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + # # Sample Tests # @@ -201,6 +218,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index ba55d7ce53ca..b90eef00f2d9 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -39,6 +39,10 @@ # You can opt out from the test for specific Python versions. 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string @@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - session.install("flake8", "flake8-import-order") + if not TEST_CONFIG['enforce_type_hints']: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ @@ -141,8 +148,18 @@ def lint(session): "." ] session.run("flake8", *args) +# +# Black +# +@nox.session +def blacken(session): + session.install("black") + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + # # Sample Tests # @@ -201,6 +218,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index ba55d7ce53ca..b90eef00f2d9 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -39,6 +39,10 @@ # You can opt out from the test for specific Python versions. 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string @@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - session.install("flake8", "flake8-import-order") + if not TEST_CONFIG['enforce_type_hints']: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ @@ -141,8 +148,18 @@ def lint(session): "." ] session.run("flake8", *args) +# +# Black +# +@nox.session +def blacken(session): + session.install("black") + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + # # Sample Tests # @@ -201,6 +218,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index ba55d7ce53ca..b90eef00f2d9 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -39,6 +39,10 @@ # You can opt out from the test for specific Python versions. 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string @@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - session.install("flake8", "flake8-import-order") + if not TEST_CONFIG['enforce_type_hints']: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ @@ -141,8 +148,18 @@ def lint(session): "." ] session.run("flake8", *args) +# +# Black +# +@nox.session +def blacken(session): + session.install("black") + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + # # Sample Tests # @@ -201,6 +218,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index ba55d7ce53ca..b90eef00f2d9 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -39,6 +39,10 @@ # You can opt out from the test for specific Python versions. 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string @@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - session.install("flake8", "flake8-import-order") + if not TEST_CONFIG['enforce_type_hints']: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ @@ -141,8 +148,18 @@ def lint(session): "." ] session.run("flake8", *args) +# +# Black +# +@nox.session +def blacken(session): + session.install("black") + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + # # Sample Tests # @@ -201,6 +218,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") From d20843be7c8ed3f9652c8aa6e1fbb4b7201e503b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 13 Jan 2021 20:05:43 +0100 Subject: [PATCH 199/323] chore(deps): update dependency numpy to v1.19.5 (#67) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index d1ae7cf21a09..1bcdb3e49eed 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==2.0.0 -numpy==1.19.4 \ No newline at end of file +numpy==1.19.5 \ No newline at end of file From bff86e4a7d42670f3268ac02678db26e74e3cb7e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 13 Jan 2021 20:14:03 +0100 Subject: [PATCH 200/323] chore(deps): update dependency google-auth to v1.24.0 (#65) [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | minor | `==1.23.0` -> `==1.24.0` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.24.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1240-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1230v1240-2020-12-11) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.23.0...v1.24.0) ##### Features - add Python 3.9 support, drop Python 3.5 support ([#​655](https://www.github.com/googleapis/google-auth-library-python/issues/655)) ([6de753d](https://www.github.com/googleapis/google-auth-library-python/commit/6de753d585254c813b3e6cbde27bf5466261ba10)), closes [#​654](https://www.github.com/googleapis/google-auth-library-python/issues/654) ##### Bug Fixes - avoid losing the original '\_include_email' parameter in impersonated credentials ([#​626](https://www.github.com/googleapis/google-auth-library-python/issues/626)) ([fd9b5b1](https://www.github.com/googleapis/google-auth-library-python/commit/fd9b5b10c80950784bd37ee56e32c505acb5078d)) ##### Documentation - fix typo in import ([#​651](https://www.github.com/googleapis/google-auth-library-python/issues/651)) ([3319ea8](https://www.github.com/googleapis/google-auth-library-python/commit/3319ea8ae876c73a94f51237b3bbb3f5df2aef89)), closes [#​650](https://www.github.com/googleapis/google-auth-library-python/issues/650)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0026146b0dba..db6d88d76662 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.8 -google-auth==1.23.0 +google-auth==1.24.0 google-auth-httplib2==0.0.4 From 57306c65c5c4fe98e26ed2c7679ba807be40d14e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 10 Feb 2021 19:19:56 +0100 Subject: [PATCH 201/323] chore(deps): update dependency google-auth to v1.25.0 (#73) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index db6d88d76662..8dea16e29f9a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.8 -google-auth==1.24.0 +google-auth==1.25.0 google-auth-httplib2==0.0.4 From 144c62951dfdc0484ec4f32300e51ba197f69645 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Feb 2021 00:14:28 +0100 Subject: [PATCH 202/323] chore(deps): update dependency google-auth to v1.26.1 (#75) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 8dea16e29f9a..575e9508dec7 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.8 -google-auth==1.25.0 +google-auth==1.26.1 google-auth-httplib2==0.0.4 From 796153fa69ba7617c0c16a23ca14c28e82d2d45e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Feb 2021 07:40:02 +0100 Subject: [PATCH 203/323] chore(deps): update dependency numpy to v1.20.1 (#74) [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [numpy](https://www.numpy.org) ([source](https://togithub.com/numpy/numpy)) | `==1.19.5` -> `==1.20.1` | [![age](https://badges.renovateapi.com/packages/pypi/numpy/1.20.1/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/numpy/1.20.1/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/numpy/1.20.1/compatibility-slim/1.19.5)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/numpy/1.20.1/confidence-slim/1.19.5)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
numpy/numpy ### [`v1.20.1`](https://togithub.com/numpy/numpy/releases/v1.20.1) [Compare Source](https://togithub.com/numpy/numpy/compare/v1.20.0...v1.20.1) # NumPy 1.20.1 Release Notes NumPy 1,20.1 is a rapid bugfix release fixing several bugs and regressions reported after the 1.20.0 release. ## Highlights - The distutils bug that caused problems with downstream projects is fixed. - The `random.shuffle` regression is fixed. ## Contributors A total of 8 people contributed to this release. People with a \\"+\\" by their names contributed a patch for the first time. - Bas van Beek - Charles Harris - Nicholas McKibben + - Pearu Peterson - Ralf Gommers - Sebastian Berg - Tyler Reddy - \\[@​Aerysv](https://togithub.com/Aerysv) + ## Pull requests merged A total of 15 pull requests were merged for this release. - [#​18306](https://togithub.com/numpy/numpy/pull/18306): MAINT: Add missing placeholder annotations - [#​18310](https://togithub.com/numpy/numpy/pull/18310): BUG: Fix typo in `numpy.__init__.py` - [#​18326](https://togithub.com/numpy/numpy/pull/18326): BUG: don\\'t mutate list of fake libraries while iterating over... - [#​18327](https://togithub.com/numpy/numpy/pull/18327): MAINT: gracefully shuffle memoryviews - [#​18328](https://togithub.com/numpy/numpy/pull/18328): BUG: Use C linkage for random distributions - [#​18336](https://togithub.com/numpy/numpy/pull/18336): CI: fix when GitHub Actions builds trigger, and allow ci skips - [#​18337](https://togithub.com/numpy/numpy/pull/18337): BUG: Allow unmodified use of isclose, allclose, etc. with timedelta - [#​18345](https://togithub.com/numpy/numpy/pull/18345): BUG: Allow pickling all relevant DType types/classes - [#​18351](https://togithub.com/numpy/numpy/pull/18351): BUG: Fix missing signed_char dependency. Closes #​18335. - [#​18352](https://togithub.com/numpy/numpy/pull/18352): DOC: Change license date 2020 -> 2021 - [#​18353](https://togithub.com/numpy/numpy/pull/18353): CI: CircleCI seems to occasionally time out, increase the limit - [#​18354](https://togithub.com/numpy/numpy/pull/18354): BUG: Fix f2py bugs when wrapping F90 subroutines. - [#​18356](https://togithub.com/numpy/numpy/pull/18356): MAINT: crackfortran regex simplify - [#​18357](https://togithub.com/numpy/numpy/pull/18357): BUG: threads.h existence test requires GLIBC > 2.12. - [#​18359](https://togithub.com/numpy/numpy/pull/18359): REL: Prepare for the NumPy 1.20.1 release. ## Checksums ##### MD5 c4748f4f8f703c5e96027407eca02b08 numpy-1.20.1-cp37-cp37m-macosx_10_9_x86_64.whl f0bf3a78d6b3a169e5a7fb2637f7fd87 numpy-1.20.1-cp37-cp37m-manylinux1_i686.whl 493c17647c05ca5043bcbab1ac266a74 numpy-1.20.1-cp37-cp37m-manylinux1_x86_64.whl 55ec954fc598c72b2bbf57bfa8b2a701 numpy-1.20.1-cp37-cp37m-manylinux2010_i686.whl 8cee88f9683d208686081522609a8726 numpy-1.20.1-cp37-cp37m-manylinux2010_x86_64.whl 26399d3ededc53b354de78f977a6197e numpy-1.20.1-cp37-cp37m-manylinux2014_aarch64.whl 81051f1e7a79eea8a5aaf5718114ce3a numpy-1.20.1-cp37-cp37m-win32.whl 899488c55824f02a7a6f0451fc86f63f numpy-1.20.1-cp37-cp37m-win_amd64.whl 17f4dae5a0d143b46345a9cf1a8c8dec numpy-1.20.1-cp38-cp38-macosx_10_9_x86_64.whl f254e98e92b3054c567b6220b37b81d3 numpy-1.20.1-cp38-cp38-manylinux1_i686.whl 483f43a62c7e32ae991990786da90de1 numpy-1.20.1-cp38-cp38-manylinux1_x86_64.whl bf578b783e36d3feb3344973306a9f96 numpy-1.20.1-cp38-cp38-manylinux2010_i686.whl f5d6c77c898537017e64ee30b243fdca numpy-1.20.1-cp38-cp38-manylinux2010_x86_64.whl 5cf541a0d5af3d5812d2970a427075fb numpy-1.20.1-cp38-cp38-manylinux2014_aarch64.whl 178315c579c0a70285b8ee502eb498af numpy-1.20.1-cp38-cp38-win32.whl 5164a32e7a00a2b285302b563eb58afe numpy-1.20.1-cp38-cp38-win_amd64.whl c123dd10788ea9ff788d735cbee444c5 numpy-1.20.1-cp39-cp39-macosx_10_9_x86_64.whl 72282fefe58650c6e7cc41f5b37b8662 numpy-1.20.1-cp39-cp39-manylinux2010_i686.whl 234d57c1a7b1f8b99c054a7a71a51cbe numpy-1.20.1-cp39-cp39-manylinux2010_x86_64.whl 352243d4285970e45d825024ca566d47 numpy-1.20.1-cp39-cp39-manylinux2014_aarch64.whl a78c863323e0f56210c2e1acaad1bc22 numpy-1.20.1-cp39-cp39-win32.whl 86f9d3f358e7d7896e713bce99f17fdd numpy-1.20.1-cp39-cp39-win_amd64.whl ed2c81132119fb3c7f73c6a2de306058 numpy-1.20.1-pp37-pypy37_pp73-manylinux2010_x86_64.whl 60a5e2517be19394a7df24f6d4add3f2 numpy-1.20.1.tar.gz 30ea1c7868e73eeff2c86ac465311220 numpy-1.20.1.zip ##### SHA256 ae61f02b84a0211abb56462a3b6cd1e7ec39d466d3160eb4e1da8bf6717cdbeb numpy-1.20.1-cp37-cp37m-macosx_10_9_x86_64.whl 65410c7f4398a0047eea5cca9b74009ea61178efd78d1be9847fac1d6716ec1e numpy-1.20.1-cp37-cp37m-manylinux1_i686.whl 2d7e27442599104ee08f4faed56bb87c55f8b10a5494ac2ead5c98a4b289e61f numpy-1.20.1-cp37-cp37m-manylinux1_x86_64.whl 4ed8e96dc146e12c1c5cdd6fb9fd0757f2ba66048bf94c5126b7efebd12d0090 numpy-1.20.1-cp37-cp37m-manylinux2010_i686.whl ecb5b74c702358cdc21268ff4c37f7466357871f53a30e6f84c686952bef16a9 numpy-1.20.1-cp37-cp37m-manylinux2010_x86_64.whl b9410c0b6fed4a22554f072a86c361e417f0258838957b78bd063bde2c7f841f numpy-1.20.1-cp37-cp37m-manylinux2014_aarch64.whl 3d3087e24e354c18fb35c454026af3ed8997cfd4997765266897c68d724e4845 numpy-1.20.1-cp37-cp37m-win32.whl 89f937b13b8dd17b0099c7c2e22066883c86ca1575a975f754babc8fbf8d69a9 numpy-1.20.1-cp37-cp37m-win_amd64.whl a1d7995d1023335e67fb070b2fae6f5968f5be3802b15ad6d79d81ecaa014fe0 numpy-1.20.1-cp38-cp38-macosx_10_9_x86_64.whl 60759ab15c94dd0e1ed88241fd4fa3312db4e91d2c8f5a2d4cf3863fad83d65b numpy-1.20.1-cp38-cp38-manylinux1_i686.whl 125a0e10ddd99a874fd357bfa1b636cd58deb78ba4a30b5ddb09f645c3512e04 numpy-1.20.1-cp38-cp38-manylinux1_x86_64.whl c26287dfc888cf1e65181f39ea75e11f42ffc4f4529e5bd19add57ad458996e2 numpy-1.20.1-cp38-cp38-manylinux2010_i686.whl 7199109fa46277be503393be9250b983f325880766f847885607d9b13848f257 numpy-1.20.1-cp38-cp38-manylinux2010_x86_64.whl 72251e43ac426ff98ea802a931922c79b8d7596480300eb9f1b1e45e0543571e numpy-1.20.1-cp38-cp38-manylinux2014_aarch64.whl c91ec9569facd4757ade0888371eced2ecf49e7982ce5634cc2cf4e7331a4b14 numpy-1.20.1-cp38-cp38-win32.whl 13adf545732bb23a796914fe5f891a12bd74cf3d2986eed7b7eba2941eea1590 numpy-1.20.1-cp38-cp38-win_amd64.whl 104f5e90b143dbf298361a99ac1af4cf59131218a045ebf4ee5990b83cff5fab numpy-1.20.1-cp39-cp39-macosx_10_9_x86_64.whl 89e5336f2bec0c726ac7e7cdae181b325a9c0ee24e604704ed830d241c5e47ff numpy-1.20.1-cp39-cp39-manylinux2010_i686.whl 032be656d89bbf786d743fee11d01ef318b0781281241997558fa7950028dd29 numpy-1.20.1-cp39-cp39-manylinux2010_x86_64.whl 66b467adfcf628f66ea4ac6430ded0614f5cc06ba530d09571ea404789064adc numpy-1.20.1-cp39-cp39-manylinux2014_aarch64.whl 12e4ba5c6420917571f1a5becc9338abbde71dd811ce40b37ba62dec7b39af6d numpy-1.20.1-cp39-cp39-win32.whl 9c94cab5054bad82a70b2e77741271790304651d584e2cdfe2041488e753863b numpy-1.20.1-cp39-cp39-win_amd64.whl 9eb551d122fadca7774b97db8a112b77231dcccda8e91a5bc99e79890797175e numpy-1.20.1-pp37-pypy37_pp73-manylinux2010_x86_64.whl 9bf51d69ebb4ca9239e55bedc2185fe2c0ec222da0adee7ece4125414676846d numpy-1.20.1.tar.gz 3bc63486a870294683980d76ec1e3efc786295ae00128f9ea38e2c6e74d5a60a numpy-1.20.1.zip ### [`v1.20.0`](https://togithub.com/numpy/numpy/releases/v1.20.0) [Compare Source](https://togithub.com/numpy/numpy/compare/v1.19.5...v1.20.0) # NumPy 1.20.0 Release Notes This NumPy release is the largest so made to date, some 684 PRs contributed by 184 people have been merged. See the list of highlights below for more details. The Python versions supported for this release are 3.7-3.9, support for Python 3.6 has been dropped. Highlights are - Annotations for NumPy functions. This work is ongoing and improvements can be expected pending feedback from users. - Wider use of SIMD to increase execution speed of ufuncs. Much work has been done in introducing universal functions that will ease use of modern features across different hardware platforms. This work is ongoing. - Preliminary work in changing the dtype and casting implementations in order to provide an easier path to extending dtypes. This work is ongoing but enough has been done to allow experimentation and feedback. - Extensive documentation improvements comprising some 185 PR merges. This work is ongoing and part of the larger project to improve NumPy\\'s online presence and usefulness to new users. - Further cleanups related to removing Python 2.7. This improves code readability and removes technical debt. - Preliminary support for the upcoming Cython 3.0. ## New functions ##### The random.Generator class has a new `permuted` function. The new function differs from `shuffle` and `permutation` in that the subarrays indexed by an axis are permuted rather than the axis being treated as a separate 1-D array for every combination of the other indexes. For example, it is now possible to permute the rows or columns of a 2-D array. ([gh-15121](https://togithub.com/numpy/numpy/pull/15121)) ##### `sliding_window_view` provides a sliding window view for numpy arrays `numpy.lib.stride\_tricks.sliding\_window\_view` constructs views on numpy arrays that offer a sliding or moving window access to the array. This allows for the simple implementation of certain algorithms, such as running means. ([gh-17394](https://togithub.com/numpy/numpy/pull/17394)) ##### [numpy.broadcast\_shapes]{.title-ref} is a new user-facing function `numpy.broadcast\_shapes` gets the resulting shape from broadcasting the given shape tuples against each other. ```{.python} >>> np.broadcast_shapes((1, 2), (3, 1)) (3, 2) >>> np.broadcast_shapes(2, (3, 1)) (3, 2) >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) (5, 6, 7) ``` ([gh-17535](https://togithub.com/numpy/numpy/pull/17535)) ## Deprecations ##### Using the aliases of builtin types like `np.int` is deprecated For a long time, `np.int` has been an alias of the builtin `int`. This is repeatedly a cause of confusion for newcomers, and existed mainly for historic reasons. These aliases have been deprecated. The table below shows the full list of deprecated aliases, along with their exact meaning. Replacing uses of items in the first column with the contents of the second column will work identically and silence the deprecation warning. The third column lists alternative NumPy names which may occasionally be preferential. See also `basics.types`{.interpreted-text role="ref"} for additional details. | Deprecated name | Identical to | NumPy scalar type names | | --------------- | ------------ | ------------------------------------------------------------------- | | `numpy.bool` | `bool` | `numpy.bool\_` | | `numpy.int` | `int` | `numpy.int\_` (default), `numpy.int64`, or `numpy.int32` | | `numpy.float` | `float` | `numpy.float64`, `numpy.float\_`, `numpy.double` (equivalent) | | `numpy.complex` | `complex` | `numpy.complex128`, `numpy.complex\_`, `numpy.cdouble` (equivalent) | | `numpy.object` | `object` | `numpy.object\_` | | `numpy.str` | `str` | `numpy.str\_` | | `numpy.long` | `int` | `numpy.int\_`(C `long`), `numpy.longlong` (largest integer type) | | `numpy.unicode` | `str` | `numpy.unicode\_` | To give a clear guideline for the vast majority of cases, for the types `bool`, `object`, `str` (and `unicode`) using the plain version is shorter and clear, and generally a good replacement. For `float` and `complex` you can use `float64` and `complex128` if you wish to be more explicit about the precision. For `np.int` a direct replacement with `np.int_` or `int` is also good and will not change behavior, but the precision will continue to depend on the computer and operating system. If you want to be more explicit and review the current use, you have the following alternatives: - `np.int64` or `np.int32` to specify the precision exactly. This ensures that results cannot depend on the computer or operating system. - `np.int_` or `int` (the default), but be aware that it depends on the computer and operating system. - The C types: `np.cint` (int), `np.int_` (long), `np.longlong`. - `np.intp` which is 32bit on 32bit machines 64bit on 64bit machines. This can be the best type to use for indexing. When used with `np.dtype(...)` or `dtype=...` changing it to the NumPy name as mentioned above will have no effect on the output. If used as a scalar with: np.float(123) changing it can subtly change the result. In this case, the Python version `float(123)` or `int(12.)` is normally preferable, although the NumPy version may be useful for consistency with NumPy arrays (for example, NumPy behaves differently for things like division by zero). ([gh-14882](https://togithub.com/numpy/numpy/pull/14882)) ##### Passing `shape=None` to functions with a non-optional shape argument is deprecated Previously, this was an alias for passing `shape=()`. This deprecation is emitted by `PyArray\_IntpConverter` in the C API. If your API is intended to support passing `None`, then you should check for `None` prior to invoking the converter, so as to be able to distinguish `None` and `()`. ([gh-15886](https://togithub.com/numpy/numpy/pull/15886)) ##### Indexing errors will be reported even when index result is empty In the future, NumPy will raise an IndexError when an integer array index contains out of bound values even if a non-indexed dimension is of length 0. This will now emit a DeprecationWarning. This can happen when the array is previously empty, or an empty slice is involved: arr1 = np.zeros((5, 0)) arr1[[20]] arr2 = np.zeros((5, 5)) arr2[[20], :0] Previously the non-empty index `[20]` was not checked for correctness. It will now be checked causing a deprecation warning which will be turned into an error. This also applies to assignments. ([gh-15900](https://togithub.com/numpy/numpy/pull/15900)) ##### Inexact matches for `mode` and `searchside` are deprecated Inexact and case insensitive matches for `mode` and `searchside` were valid inputs earlier and will give a DeprecationWarning now. For example, below are some example usages which are now deprecated and will give a DeprecationWarning: import numpy as np arr = np.array([[3, 6, 6], [4, 5, 1]]) ### mode: inexact match np.ravel_multi_index(arr, (7, 6), mode="clap") # should be "clip" ### searchside: inexact match np.searchsorted(arr[0], 4, side='random') # should be "right" ([gh-16056](https://togithub.com/numpy/numpy/pull/16056)) ##### Deprecation of [numpy.dual]{.title-ref} The module `numpy.dual` is deprecated. Instead of importing functions from `numpy.dual`, the functions should be imported directly from NumPy or SciPy. ([gh-16156](https://togithub.com/numpy/numpy/pull/16156)) ##### `outer` and `ufunc.outer` deprecated for matrix `np.matrix` use with `\~numpy.outer` or generic ufunc outer calls such as `numpy.add.outer`. Previously, matrix was converted to an array here. This will not be done in the future requiring a manual conversion to arrays. ([gh-16232](https://togithub.com/numpy/numpy/pull/16232)) ##### Further Numeric Style types Deprecated The remaining numeric-style type codes `Bytes0`, `Str0`, `Uint32`, `Uint64`, and `Datetime64` have been deprecated. The lower-case variants should be used instead. For bytes and string `"S"` and `"U"` are further alternatives. ([gh-16554](https://togithub.com/numpy/numpy/pull/16554)) ##### The `ndincr` method of `ndindex` is deprecated The documentation has warned against using this function since NumPy 1.8. Use `next(it)` instead of `it.ndincr()`. ([gh-17233](https://togithub.com/numpy/numpy/pull/17233)) ##### ArrayLike objects which do not define `__len__` and `__getitem__` Objects which define one of the protocols `__array__`, `__array_interface__`, or `__array_struct__` but are not sequences (usually defined by having a `__len__` and `__getitem__`) will behave differently during array-coercion in the future. When nested inside sequences, such as `np.array([array_like])`, these were handled as a single Python object rather than an array. In the future they will behave identically to: np.array([np.array(array_like)]) This change should only have an effect if `np.array(array_like)` is not 0-D. The solution to this warning may depend on the object: - Some array-likes may expect the new behaviour, and users can ignore the warning. The object can choose to expose the sequence protocol to opt-in to the new behaviour. - For example, `shapely` will allow conversion to an array-like using `line.coords` rather than `np.asarray(line)`. Users may work around the warning, or use the new convention when it becomes available. Unfortunately, using the new behaviour can only be achieved by calling `np.array(array_like)`. If you wish to ensure that the old behaviour remains unchanged, please create an object array and then fill it explicitly, for example: arr = np.empty(3, dtype=object) arr[:] = [array_like1, array_like2, array_like3] This will ensure NumPy knows to not enter the array-like and use it as a object instead. ([gh-17973](https://togithub.com/numpy/numpy/pull/17973)) ## Future Changes ##### Arrays cannot be using subarray dtypes Array creation and casting using `np.array(arr, dtype)` and `arr.astype(dtype)` will use different logic when `dtype` is a subarray dtype such as `np.dtype("(2)i,")`. For such a `dtype` the following behaviour is true: res = np.array(arr, dtype) res.dtype is not dtype res.dtype is dtype.base res.shape == arr.shape + dtype.shape But `res` is filled using the logic: res = np.empty(arr.shape + dtype.shape, dtype=dtype.base) res[...] = arr which uses incorrect broadcasting (and often leads to an error). In the future, this will instead cast each element individually, leading to the same result as: res = np.array(arr, dtype=np.dtype(["f", dtype]))["f"] Which can normally be used to opt-in to the new behaviour. This change does not affect `np.array(list, dtype="(2)i,")` unless the `list` itself includes at least one array. In particular, the behaviour is unchanged for a list of tuples. ([gh-17596](https://togithub.com/numpy/numpy/pull/17596)) ## Expired deprecations - The deprecation of numeric style type-codes `np.dtype("Complex64")` (with upper case spelling), is expired. `"Complex64"` corresponded to `"complex128"` and `"Complex32"` corresponded to `"complex64"`. - The deprecation of `np.sctypeNA` and `np.typeNA` is expired. Both have been removed from the public API. Use `np.typeDict` instead. ([gh-16554](https://togithub.com/numpy/numpy/pull/16554)) - The 14-year deprecation of `np.ctypeslib.ctypes_load_library` is expired. Use `~numpy.ctypeslib.load_library`{.interpreted-text role="func"} instead, which is identical. ([gh-17116](https://togithub.com/numpy/numpy/pull/17116)) ##### Financial functions removed In accordance with NEP 32, the financial functions are removed from NumPy 1.20. The functions that have been removed are `fv`, `ipmt`, `irr`, `mirr`, `nper`, `npv`, `pmt`, `ppmt`, `pv`, and `rate`. These functions are available in the [numpy_financial](https://pypi.org/project/numpy-financial) library. ([gh-17067](https://togithub.com/numpy/numpy/pull/17067)) ## Compatibility notes ##### `isinstance(dtype, np.dtype)` and not `type(dtype) is not np.dtype` NumPy dtypes are not direct instances of `np.dtype` anymore. Code that may have used `type(dtype) is np.dtype` will always return `False` and must be updated to use the correct version `isinstance(dtype, np.dtype)`. This change also affects the C-side macro `PyArray_DescrCheck` if compiled against a NumPy older than 1.16.6. If code uses this macro and wishes to compile against an older version of NumPy, it must replace the macro (see also [C API changes](#c-api-changes) section). ##### Same kind casting in concatenate with `axis=None` When [\~numpy.concatenate]{.title-ref} is called with `axis=None`, the flattened arrays were cast with `unsafe`. Any other axis choice uses \\"same kind\\". That different default has been deprecated and \\"same kind\\" casting will be used instead. The new `casting` keyword argument can be used to retain the old behaviour. ([gh-16134](https://togithub.com/numpy/numpy/pull/16134)) ##### NumPy Scalars are cast when assigned to arrays When creating or assigning to arrays, in all relevant cases NumPy scalars will now be cast identically to NumPy arrays. In particular this changes the behaviour in some cases which previously raised an error: np.array([np.float64(np.nan)], dtype=np.int64) will succeed and return an undefined result (usually the smallest possible integer). This also affects assignments: arr[0] = np.float64(np.nan) At this time, NumPy retains the behaviour for: np.array(np.float64(np.nan), dtype=np.int64) The above changes do not affect Python scalars: np.array([float("NaN")], dtype=np.int64) remains unaffected (`np.nan` is a Python `float`, not a NumPy one). Unlike signed integers, unsigned integers do not retain this special case, since they always behaved more like casting. The following code stops raising an error: np.array([np.float64(np.nan)], dtype=np.uint64) To avoid backward compatibility issues, at this time assignment from `datetime64` scalar to strings of too short length remains supported. This means that `np.asarray(np.datetime64("2020-10-10"), dtype="S5")` succeeds now, when it failed before. In the long term this may be deprecated or the unsafe cast may be allowed generally to make assignment of arrays and scalars behave consistently. ##### Array coercion changes when Strings and other types are mixed When strings and other types are mixed, such as: np.array(["string", np.float64(3.)], dtype="S") The results will change, which may lead to string dtypes with longer strings in some cases. In particularly, if `dtype="S"` is not provided any numerical value will lead to a string results long enough to hold all possible numerical values. (e.g. \\"S32\\" for floats). Note that you should always provide `dtype="S"` when converting non-strings to strings. If `dtype="S"` is provided the results will be largely identical to before, but NumPy scalars (not a Python float like `1.0`), will still enforce a uniform string length: np.array([np.float64(3.)], dtype="S") # gives "S32" np.array([3.0], dtype="S") # gives "S3" Previously the first version gave the same result as the second. ##### Array coercion restructure Array coercion has been restructured. In general, this should not affect users. In extremely rare corner cases where array-likes are nested: np.array([array_like1]) Things will now be more consistent with: np.array([np.array(array_like1)]) This can subtly change output for some badly defined array-likes. One example for this are array-like objects which are not also sequences of matching shape. In NumPy 1.20, a warning will be given when an array-like is not also a sequence (but behaviour remains identical, see deprecations). If an array like is also a sequence (defines `__getitem__` and `__len__`) NumPy will now only use the result given by `__array__`, `__array_interface__`, or `__array_struct__`. This will result in differences when the (nested) sequence describes a different shape. ([gh-16200](https://togithub.com/numpy/numpy/pull/16200)) ##### Writing to the result of `numpy.broadcast\_arrays` will export readonly buffers In NumPy 1.17 `numpy.broadcast\_arrays` started warning when the resulting array was written to. This warning was skipped when the array was used through the buffer interface (e.g. `memoryview(arr)`). The same thing will now occur for the two protocols `__array_interface__`, and `__array_struct__` returning read-only buffers instead of giving a warning. ([gh-16350](https://togithub.com/numpy/numpy/pull/16350)) ##### Numeric-style type names have been removed from type dictionaries To stay in sync with the deprecation for `np.dtype("Complex64")` and other numeric-style (capital case) types. These were removed from `np.sctypeDict` and `np.typeDict`. You should use the lower case versions instead. Note that `"Complex64"` corresponds to `"complex128"` and `"Complex32"` corresponds to `"complex64"`. The numpy style (new) versions, denote the full size and not the size of the real/imaginary part. ([gh-16554](https://togithub.com/numpy/numpy/pull/16554)) ##### The `operator.concat` function now raises TypeError for array arguments The previous behavior was to fall back to addition and add the two arrays, which was thought to be unexpected behavior for a concatenation function. ([gh-16570](https://togithub.com/numpy/numpy/pull/16570)) ##### `nickname` attribute removed from ABCPolyBase An abstract property `nickname` has been removed from `ABCPolyBase` as it was no longer used in the derived convenience classes. This may affect users who have derived classes from `ABCPolyBase` and overridden the methods for representation and display, e.g. `__str__`, `__repr__`, `_repr_latex`, etc. ([gh-16589](https://togithub.com/numpy/numpy/pull/16589)) ##### `float->timedelta` and `uint64->timedelta` promotion will raise a TypeError Float and timedelta promotion consistently raises a TypeError. `np.promote_types("float32", "m8")` aligns with `np.promote_types("m8", "float32")` now and both raise a TypeError. Previously, `np.promote_types("float32", "m8")` returned `"m8"` which was considered a bug. Uint64 and timedelta promotion consistently raises a TypeError. `np.promote_types("uint64", "m8")` aligns with `np.promote_types("m8", "uint64")` now and both raise a TypeError. Previously, `np.promote_types("uint64", "m8")` returned `"m8"` which was considered a bug. ([gh-16592](https://togithub.com/numpy/numpy/pull/16592)) ##### `numpy.genfromtxt` now correctly unpacks structured arrays Previously, `numpy.genfromtxt` failed to unpack if it was called with `unpack=True` and a structured datatype was passed to the `dtype` argument (or `dtype=None` was passed and a structured datatype was inferred). For example: >>> data = StringIO("21 58.0\n35 72.0") >>> np.genfromtxt(data, dtype=None, unpack=True) array([(21, 58.), (35, 72.)], dtype=[('f0', '>> np.genfromtxt(data, dtype=None, unpack=True) [array([21, 35]), array([58., 72.])] ([gh-16650](https://togithub.com/numpy/numpy/pull/16650)) ##### `mgrid`, `r_`, etc. consistently return correct outputs for non-default precision input Previously, `np.mgrid[np.float32(0.1):np.float32(0.35):np.float32(0.1),]` and `np.r_[0:10:np.complex64(3j)]` failed to return meaningful output. This bug potentially affects [\~numpy.mgrid]{.title-ref}, `numpy.ogrid`, `numpy.r\_`, and `numpy.c\_` when an input with dtype other than the default `float64` and `complex128` and equivalent Python types were used. The methods have been fixed to handle varying precision correctly. ([gh-16815](https://togithub.com/numpy/numpy/pull/16815)) ##### Boolean array indices with mismatching shapes now properly give `IndexError` Previously, if a boolean array index matched the size of the indexed array but not the shape, it was incorrectly allowed in some cases. In other cases, it gave an error, but the error was incorrectly a `ValueError` with a message about broadcasting instead of the correct `IndexError`. For example, the following used to incorrectly give `ValueError: operands could not be broadcast together with shapes (2,2) (1,4)`: ```{.python} np.empty((2, 2))[np.array([[True, False, False, False]])] ``` And the following used to incorrectly return `array([], dtype=float64)`: ```{.python} np.empty((2, 2))[np.array([[False, False, False, False]])] ``` Both now correctly give `IndexError: boolean index did not match indexed array along dimension 0; dimension is 2 but corresponding boolean dimension is 1`. ([gh-17010](https://togithub.com/numpy/numpy/pull/17010)) ##### Casting errors interrupt Iteration When iterating while casting values, an error may stop the iteration earlier than before. In any case, a failed casting operation always returned undefined, partial results. Those may now be even more undefined and partial. For users of the `NpyIter` C-API such cast errors will now cause the [iternext()]{.title-ref} function to return 0 and thus abort iteration. Currently, there is no API to detect such an error directly. It is necessary to check `PyErr_Occurred()`, which may be problematic in combination with `NpyIter_Reset`. These issues always existed, but new API could be added if required by users. ([gh-17029](https://togithub.com/numpy/numpy/pull/17029)) ##### f2py generated code may return unicode instead of byte strings Some byte strings previously returned by f2py generated code may now be unicode strings. This results from the ongoing Python2 -> Python3 cleanup. ([gh-17068](https://togithub.com/numpy/numpy/pull/17068)) ##### The first element of the `__array_interface__["data"]` tuple must be an integer This has been the documented interface for many years, but there was still code that would accept a byte string representation of the pointer address. That code has been removed, passing the address as a byte string will now raise an error. ([gh-17241](https://togithub.com/numpy/numpy/pull/17241)) ##### poly1d respects the dtype of all-zero argument Previously, constructing an instance of `poly1d` with all-zero coefficients would cast the coefficients to `np.float64`. This affected the output dtype of methods which construct `poly1d` instances internally, such as `np.polymul`. ([gh-17577](https://togithub.com/numpy/numpy/pull/17577)) ##### The numpy.i file for swig is Python 3 only. Uses of Python 2.7 C-API functions have been updated to Python 3 only. Users who need the old version should take it from an older version of NumPy. ([gh-17580](https://togithub.com/numpy/numpy/pull/17580)) ##### Void dtype discovery in `np.array` In calls using `np.array(..., dtype="V")`, `arr.astype("V")`, and similar a TypeError will now be correctly raised unless all elements have the identical void length. An example for this is: np.array([b"1", b"12"], dtype="V") Which previously returned an array with dtype `"V2"` which cannot represent `b"1"` faithfully. ([gh-17706](https://togithub.com/numpy/numpy/pull/17706)) ## C API changes ##### The `PyArray_DescrCheck` macro is modified The `PyArray_DescrCheck` macro has been updated since NumPy 1.16.6 to be: #define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) Starting with NumPy 1.20 code that is compiled against an earlier version will be API incompatible with NumPy 1.20. The fix is to either compile against 1.16.6 (if the NumPy 1.16 release is the oldest release you wish to support), or manually inline the macro by replacing it with the new definition: PyObject_TypeCheck(op, &PyArrayDescr_Type) which is compatible with all NumPy versions. ##### Size of `np.ndarray` and `np.void_` changed The size of the `PyArrayObject` and `PyVoidScalarObject` structures have changed. The following header definition has been removed: #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) since the size must not be considered a compile time constant: it will change for different runtime versions of NumPy. The most likely relevant use are potential subclasses written in C which will have to be recompiled and should be updated. Please see the documentation for :c`PyArrayObject`{.interpreted-text role="type"} for more details and contact the NumPy developers if you are affected by this change. NumPy will attempt to give a graceful error but a program expecting a fixed structure size may have undefined behaviour and likely crash. ([gh-16938](https://togithub.com/numpy/numpy/pull/16938)) ## New Features ##### `where` keyword argument for `numpy.all` and `numpy.any` functions The keyword argument `where` is added and allows to only consider specified elements or subaxes from an array in the Boolean evaluation of `all` and `any`. This new keyword is available to the functions `all` and `any` both via `numpy` directly or in the methods of `numpy.ndarray`. Any broadcastable Boolean array or a scalar can be set as `where`. It defaults to `True` to evaluate the functions for all elements in an array if `where` is not set by the user. Examples are given in the documentation of the functions. ##### `where` keyword argument for `numpy` functions `mean`, `std`, `var` The keyword argument `where` is added and allows to limit the scope in the calculation of `mean`, `std` and `var` to only a subset of elements. It is available both via `numpy` directly or in the methods of `numpy.ndarray`. Any broadcastable Boolean array or a scalar can be set as `where`. It defaults to `True` to evaluate the functions for all elements in an array if `where` is not set by the user. Examples are given in the documentation of the functions. ([gh-15852](https://togithub.com/numpy/numpy/pull/15852)) ##### `norm=backward`, `forward` keyword options for `numpy.fft` functions The keyword argument option `norm=backward` is added as an alias for `None` and acts as the default option; using it has the direct transforms unscaled and the inverse transforms scaled by `1/n`. Using the new keyword argument option `norm=forward` has the direct transforms scaled by `1/n` and the inverse transforms unscaled (i.e. exactly opposite to the default option `norm=backward`). ([gh-16476](https://togithub.com/numpy/numpy/pull/16476)) ##### NumPy is now typed Type annotations have been added for large parts of NumPy. There is also a new [numpy.typing]{.title-ref} module that contains useful types for end-users. The currently available types are - `ArrayLike`: for objects that can be coerced to an array - `DtypeLike`: for objects that can be coerced to a dtype ([gh-16515](https://togithub.com/numpy/numpy/pull/16515)) ##### `numpy.typing` is accessible at runtime The types in `numpy.typing` can now be imported at runtime. Code like the following will now work: ```{.python} from numpy.typing import ArrayLike x: ArrayLike = [1, 2, 3, 4] ``` ([gh-16558](https://togithub.com/numpy/numpy/pull/16558)) ##### New `__f2py_numpy_version__` attribute for f2py generated modules. Because f2py is released together with NumPy, `__f2py_numpy_version__` provides a way to track the version f2py used to generate the module. ([gh-16594](https://togithub.com/numpy/numpy/pull/16594)) ##### `mypy` tests can be run via runtests.py Currently running mypy with the NumPy stubs configured requires either: - Installing NumPy - Adding the source directory to MYPYPATH and linking to the `mypy.ini` Both options are somewhat inconvenient, so add a `--mypy` option to runtests that handles setting things up for you. This will also be useful in the future for any typing codegen since it will ensure the project is built before type checking. ([gh-17123](https://togithub.com/numpy/numpy/pull/17123)) ##### Negation of user defined BLAS/LAPACK detection order [\~numpy.distutils]{.title-ref} allows negation of libraries when determining BLAS/LAPACK libraries. This may be used to remove an item from the library resolution phase, i.e. to disallow NetLIB libraries one could do: ```{.bash} NPY_BLAS_ORDER='^blas' NPY_LAPACK_ORDER='^lapack' python setup.py build ``` That will use any of the accelerated libraries instead. ([gh-17219](https://togithub.com/numpy/numpy/pull/17219)) ##### Allow passing optimizations arguments to asv build It is now possible to pass `-j`, `--cpu-baseline`, `--cpu-dispatch` and `--disable-optimization` flags to ASV build when the `--bench-compare` argument is used. ([gh-17284](https://togithub.com/numpy/numpy/pull/17284)) ##### The NVIDIA HPC SDK nvfortran compiler is now supported Support for the nvfortran compiler, a version of pgfortran, has been added. ([gh-17344](https://togithub.com/numpy/numpy/pull/17344)) ##### `dtype` option for `cov` and `corrcoef` The `dtype` option is now available for [numpy.cov]{.title-ref} and [numpy.corrcoef]{.title-ref}. It specifies which data-type the returned result should have. By default the functions still return a [numpy.float64]{.title-ref} result. ([gh-17456](https://togithub.com/numpy/numpy/pull/17456)) ## Improvements ##### Improved string representation for polynomials (`__str__`) The string representation (`__str__`) of all six polynomial types in [numpy.polynomial]{.title-ref} has been updated to give the polynomial as a mathematical expression instead of an array of coefficients. Two package-wide formats for the polynomial expressions are available - one using Unicode characters for superscripts and subscripts, and another using only ASCII characters. ([gh-15666](https://togithub.com/numpy/numpy/pull/15666)) ##### Remove the Accelerate library as a candidate LAPACK library Apple no longer supports Accelerate. Remove it. ([gh-15759](https://togithub.com/numpy/numpy/pull/15759)) ##### Object arrays containing multi-line objects have a more readable `repr` If elements of an object array have a `repr` containing new lines, then the wrapped lines will be aligned by column. Notably, this improves the `repr` of nested arrays: >>> np.array([np.eye(2), np.eye(3)], dtype=object) array([array([[1., 0.], [0., 1.]]), array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])], dtype=object) ([gh-15997](https://togithub.com/numpy/numpy/pull/15997)) ##### Concatenate supports providing an output dtype Support was added to [\~numpy.concatenate]{.title-ref} to provide an output `dtype` and `casting` using keyword arguments. The `dtype` argument cannot be provided in conjunction with the `out` one. ([gh-16134](https://togithub.com/numpy/numpy/pull/16134)) ##### Thread safe f2py callback functions Callback functions in f2py are now thread safe. ([gh-16519](https://togithub.com/numpy/numpy/pull/16519)) ##### [numpy.core.records.fromfile]{.title-ref} now supports file-like objects [numpy.rec.fromfile]{.title-ref} can now use file-like objects, for instance :py`io.BytesIO`{.interpreted-text role="class"} ([gh-16675](https://togithub.com/numpy/numpy/pull/16675)) ##### RPATH support on AIX added to distutils This allows SciPy to be built on AIX. ([gh-16710](https://togithub.com/numpy/numpy/pull/16710)) ##### Use f90 compiler specified by the command line args The compiler command selection for Fortran Portland Group Compiler is changed in [numpy.distutils.fcompiler]{.title-ref}. This only affects the linking command. This forces the use of the executable provided by the command line option (if provided) instead of the pgfortran executable. If no executable is provided to the command line option it defaults to the pgf90 executable, wich is an alias for pgfortran according to the PGI documentation. ([gh-16730](https://togithub.com/numpy/numpy/pull/16730)) ##### Add NumPy declarations for Cython 3.0 and later The pxd declarations for Cython 3.0 were improved to avoid using deprecated NumPy C-API features. Extension modules built with Cython 3.0+ that use NumPy can now set the C macro `NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION` to avoid C compiler warnings about deprecated API usage. ([gh-16986](https://togithub.com/numpy/numpy/pull/16986)) ##### Make the window functions exactly symmetric Make sure the window functions provided by NumPy are symmetric. There were previously small deviations from symmetry due to numerical precision that are now avoided by better arrangement of the computation. ([gh-17195](https://togithub.com/numpy/numpy/pull/17195)) ## Performance improvements and changes ##### Enable multi-platform SIMD compiler optimizations A series of improvements for NumPy infrastructure to pave the way to **NEP-38**, that can be summarized as follow: - **New Build Arguments** - `--cpu-baseline` to specify the minimal set of required optimizations, default value is `min` which provides the minimum CPU features that can safely run on a wide range of users platforms. - `--cpu-dispatch` to specify the dispatched set of additional optimizations, default value is `max -xop -fma4` which enables all CPU features, except for AMD legacy features. - `--disable-optimization` to explicitly disable the whole new improvements, It also adds a new **C** compiler #definition called `NPY_DISABLE_OPTIMIZATION` which it can be used as guard for any SIMD code. - **Advanced CPU dispatcher** A flexible cross-architecture CPU dispatcher built on the top of Python/Numpy distutils, support all common compilers with a wide range of CPU features. The new dispatcher requires a special file extension `*.dispatch.c` to mark the dispatch-able **C** sources. These sources have the ability to be compiled multiple times so that each compilation process represents certain CPU features and provides different \#definitions and flags that affect the code paths. - **New auto-generated C header \`\`core/src/common/\_cpu_dispatch.h\`\`** This header is generated by the distutils module `ccompiler_opt`, and contains all the #definitions and headers of instruction sets, that had been configured through command arguments \\'--cpu-baseline\\' and \\'--cpu-dispatch\\'. - **New C header \`\`core/src/common/npy_cpu_dispatch.h\`\`** This header contains all utilities that required for the whole CPU dispatching process, it also can be considered as a bridge linking the new infrastructure work with NumPy CPU runtime detection. - **Add new attributes to NumPy umath module(Python level)** - `__cpu_baseline__` a list contains the minimal set of required optimizations that supported by the compiler and platform according to the specified values to command argument \\'--cpu-baseline\\'. - `__cpu_dispatch__` a list contains the dispatched set of additional optimizations that supported by the compiler and platform according to the specified values to command argument \\'--cpu-dispatch\\'. - **Print the supported CPU features during the run of PytestTester** ([gh-13516](https://togithub.com/numpy/numpy/pull/13516)) ## Changes ##### Changed behavior of `divmod(1., 0.)` and related functions The changes also assure that different compiler versions have the same behavior for nan or inf usages in these operations. This was previously compiler dependent, we now force the invalid and divide by zero flags, making the results the same across compilers. For example, gcc-5, gcc-8, or gcc-9 now result in the same behavior. The changes are tabulated below: | Operator | Old Warning | New Warning | Old Result | New Result | Works on MacOS | | ------------------------- | ----------- | ------------------------ | ---------- | ---------- | -------------- | | np.divmod(1.0, 0.0) | Invalid | Invalid and Dividebyzero | nan, nan | inf, nan | Yes | | np.fmod(1.0, 0.0) | Invalid | Invalid | nan | nan | No? Yes | | np.floor_divide(1.0, 0.0) | Invalid | Dividebyzero | nan | inf | Yes | | np.remainder(1.0, 0.0) | Invalid | Invalid | nan | nan | Yes | : Summary of New Behavior ([gh-16161](https://togithub.com/numpy/numpy/pull/16161)) ##### `np.linspace` on integers now uses floor When using a `int` dtype in [numpy.linspace]{.title-ref}, previously float values would be rounded towards zero. Now [numpy.floor]{.title-ref} is used instead, which rounds toward `-inf`. This changes the results for negative values. For example, the following would previously give: >>> np.linspace(-3, 1, 8, dtype=int) array([-3, -2, -1, -1, 0, 0, 0, 1]) and now results in: >>> np.linspace(-3, 1, 8, dtype=int) array([-3, -3, -2, -2, -1, -1, 0, 1]) The former result can still be obtained with: >>> np.linspace(-3, 1, 8).astype(int) array([-3, -2, -1, -1, 0, 0, 0, 1]) ([gh-16841](https://togithub.com/numpy/numpy/pull/16841)) ## Checksums ##### MD5 6f43f51475706d8346cee9604ed54e8a numpy-1.20.0-cp37-cp37m-macosx_10_9_x86_64.whl c77f563595ab4bab6185c795c573a26a numpy-1.20.0-cp37-cp37m-manylinux1_i686.whl e8f71fdb7e4e837ae79894b621e3ca08 numpy-1.20.0-cp37-cp37m-manylinux1_x86_64.whl 89c477a3eaf2e3379aa21bf80e2a2812 numpy-1.20.0-cp37-cp37m-manylinux2010_i686.whl 82211490e9375bdad57592139b49184d numpy-1.20.0-cp37-cp37m-manylinux2010_x86_64.whl b2d47be4aa123623b39f18723e0d70b7 numpy-1.20.0-cp37-cp37m-manylinux2014_aarch64.whl e884b218dc2b20895f57fae00534e8ea numpy-1.20.0-cp37-cp37m-win32.whl ec8265d429e808d8f92ed46711d66bc7 numpy-1.20.0-cp37-cp37m-win_amd64.whl 791cc5086a755929a1140018067c4587 numpy-1.20.0-cp38-cp38-macosx_10_9_x86_64.whl 2ee146bad9aa521d0bdfd7e30e982a80 numpy-1.20.0-cp38-cp38-manylinux1_i686.whl 83d74204a26e9dd3cb93653818745d09 numpy-1.20.0-cp38-cp38-manylinux1_x86_64.whl 0b0a5e36d4b75a00603cec4db09c44d7 numpy-1.20.0-cp38-cp38-manylinux2010_i686.whl c192aeac728a3abfbd16daef87b2a307 numpy-1.20.0-cp38-cp38-manylinux2010_x86_64.whl 2282da14106cb52bbf9c8c0b847c3480 numpy-1.20.0-cp38-cp38-manylinux2014_aarch64.whl 0e0e4bf53dd8ea4e232083e788419f30 numpy-1.20.0-cp38-cp38-win32.whl 93ebb884970cf7292778cb19e9f27596 numpy-1.20.0-cp38-cp38-win_amd64.whl 749cca75b33849a78e7238aeb09baded numpy-1.20.0-cp39-cp39-macosx_10_9_x86_64.whl e36e7e259bb38ccd2320f88a137115e0 numpy-1.20.0-cp39-cp39-manylinux2010_i686.whl 4979a98a2cf0a1b14a82630b717aa12b numpy-1.20.0-cp39-cp39-manylinux2010_x86_64.whl 52a78d15f15959003047ccb6b66a0ee7 numpy-1.20.0-cp39-cp39-manylinux2014_aarch64.whl 796b273028c7724a855214ae9a83e4f8 numpy-1.20.0-cp39-cp39-win32.whl 663428d8bedc5785041800ce098368cd numpy-1.20.0-cp39-cp39-win_amd64.whl 66ea4e7911de7fdce688c1b69f9c7c54 numpy-1.20.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl fc7c970084438911a50efaa8cddccebc numpy-1.20.0.tar.gz 024eb99dba56c3021458caf86f2fea0a numpy-1.20.0.zip ##### SHA256 89bd70c9ad540febe6c28451ba225eb4e49d27f64728357f512c808002325dfa numpy-1.20.0-cp37-cp37m-macosx_10_9_x86_64.whl 1264c66129f5ef63187649dd43f1ca59532e8c098723643336a85131c0dcce3f numpy-1.20.0-cp37-cp37m-manylinux1_i686.whl e9c5fd330d2fedf06051bafb996252de9b032fcb2ec03eefc9a543e56efa66d4 numpy-1.20.0-cp37-cp37m-manylinux1_x86_64.whl db5e69d08756a2fa75a42b4e433880b6187768fe1bc73d21819def893e5128c6 numpy-1.20.0-cp37-cp37m-manylinux2010_i686.whl 1abc02e30e3efd81a4571e00f8e62bf42e343c76698e0a3e11d9c2b3ee0d77a7 numpy-1.20.0-cp37-cp37m-manylinux2010_x86_64.whl 5ae765dd29c71a555f8102281f6fb15a3f4dbd35f6e7daf36af9df6d9dd716a5 numpy-1.20.0-cp37-cp37m-manylinux2014_aarch64.whl b51b9ef0624f4b01b846c981034c10d2e30db33f9f8be71e992f3900741f6f77 numpy-1.20.0-cp37-cp37m-win32.whl afeee581b50df20ef07b736e62ca612858f1fcdba96651d26ab44e3d567a4e6e numpy-1.20.0-cp37-cp37m-win_amd64.whl 2bf0e68c92ef077fe766e53f8937d8ac341bdbca68ec128ae049b7d5c34e3206 numpy-1.20.0-cp38-cp38-macosx_10_9_x86_64.whl 2445a96fbae23a4109c61be0f0af0f3bc273905dc5687a710850c1dfde0fc994 numpy-1.20.0-cp38-cp38-manylinux1_i686.whl 33edfc0eb229f86f539493917b34035054313a11afbed48404aaf9f86bf4b0f6 numpy-1.20.0-cp38-cp38-manylinux1_x86_64.whl 894aaee60043a98b03f0ad992c810f62e3a15f98a701e1c0f58a4f4a0df13429 numpy-1.20.0-cp38-cp38-manylinux2010_i686.whl b66a6c15d793eda7cdad986e737775aa31b9306d588c14dd0277d2dda5546150 numpy-1.20.0-cp38-cp38-manylinux2010_x86_64.whl eee454d3aa3955d0c0069a0f265fea47f1e1384c35a110a95efed358eb6e1562 numpy-1.20.0-cp38-cp38-manylinux2014_aarch64.whl abdfa075e293d73638ece434708aa60b510dc6e70d805f57f481a0f550b25a9e numpy-1.20.0-cp38-cp38-win32.whl f1e9424e9aa3834ea27cc12f9c6ea8ace5da18ee60a720bb3a85b2f733f41782 numpy-1.20.0-cp38-cp38-win_amd64.whl cb257bb0c0a3176c32782a63cfab2eace7eabfa2a3b2dfd85a13700617ccaf28 numpy-1.20.0-cp39-cp39-macosx_10_9_x86_64.whl cf5d9dcbdbe523fa665c5309cce5f144648d94a7fddbf5a40f8e0d5c9f5b596d numpy-1.20.0-cp39-cp39-manylinux2010_i686.whl 93c2abea7bb69f47029b84ceac30ab46dfcfdb99b671ad850a333ff794a765e4 numpy-1.20.0-cp39-cp39-manylinux2010_x86_64.whl 0d28a54afcf46f1f9ebd163e49ad6b49087f22986fefd01a23ca0c1cdda25ca6 numpy-1.20.0-cp39-cp39-manylinux2014_aarch64.whl d1bc331e1706fd1809a1bc8a31205329e5b30cf5ba50461c624da267e99f6ae6 numpy-1.20.0-cp39-cp39-win32.whl e3db646af9f6a145f0c57202f4b55d4a33f975e395e78fb7b394644c17c1a3a6 numpy-1.20.0-cp39-cp39-win_amd64.whl 4d592264d2a4f368afbb4288b5ceb646d4cbaf559c0249c096fbb0a149806b90 numpy-1.20.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl 67b630745a71b541ff6517d6f3d62b00690dc8ba0684cad0d7b0ac55aec1de53 numpy-1.20.0.tar.gz 3d8233c03f116d068d5365fed4477f2947c7229582dad81e5953088989294cec numpy-1.20.0.zip
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/classify_text/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 1bcdb3e49eed..efad4a9f1fae 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,2 +1,3 @@ google-cloud-language==2.0.0 -numpy==1.19.5 \ No newline at end of file +numpy==1.20.1; python_version > 3.6 +numpy==1.19.5; python_version <= 3.6 From 1c0612eefaea3bfc83c1d33431fd61d7e7664ae4 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 20 Feb 2021 07:10:04 +0100 Subject: [PATCH 204/323] chore(deps): update dependency google-auth to v1.27.0 (#76) [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | `==1.26.1` -> `==1.27.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-auth/1.27.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-auth/1.27.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-auth/1.27.0/compatibility-slim/1.26.1)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-auth/1.27.0/confidence-slim/1.26.1)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.27.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1270-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1261v1270-2021-02-16) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.26.1...v1.27.0) ##### Features - workload identity federation support ([#​698](https://www.github.com/googleapis/google-auth-library-python/issues/698)) ([d4d7f38](https://www.github.com/googleapis/google-auth-library-python/commit/d4d7f3815e0cea3c9f39a5204a4f001de99568e9)) ##### Bug Fixes - add pyopenssl as extra dependency ([#​697](https://www.github.com/googleapis/google-auth-library-python/issues/697)) ([aeab5d0](https://www.github.com/googleapis/google-auth-library-python/commit/aeab5d07c5538f3d8cce817df24199534572b97d)) ##### [1.26.1](https://www.github.com/googleapis/google-auth-library-python/compare/v1.26.0...v1.26.1) (2021-02-11) ##### Documentation - fix a typo in the user guide (avaiable -> available) ([#​680](https://www.github.com/googleapis/google-auth-library-python/issues/680)) ([684457a](https://www.github.com/googleapis/google-auth-library-python/commit/684457afd3f81892e12d983a61672d7ea9bbe296)) ##### Bug Fixes - revert workload identity federation support ([#​691](https://togithub.com/googleapis/google-auth-library-python/pull/691))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 575e9508dec7..98df063bf970 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.8 -google-auth==1.26.1 +google-auth==1.27.0 google-auth-httplib2==0.0.4 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index efad4a9f1fae..328dc7a517b7 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.0.0 -numpy==1.20.1; python_version > 3.6 -numpy==1.19.5; python_version <= 3.6 +numpy==1.20.1; python_version > '3.6' +numpy==1.19.5; python_version <= '3.6' From ee7677e94e23c88284a66e0e920575009ce60ecb Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 1 Apr 2021 12:52:02 -0700 Subject: [PATCH 205/323] fix: use correct retry deadlines (#83) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/d34b36d3-1026-41c2-83ac-a0e5b396b48c/targets - [ ] To automatically regenerate this PR, check this box. (May take up to 24 hours.) PiperOrigin-RevId: 364411656 Source-Link: https://github.com/googleapis/googleapis/commit/149a3a84c29c9b8189576c7442ccb6dcf6a8f95b PiperOrigin-RevId: 361662015 Source-Link: https://github.com/googleapis/googleapis/commit/28a591963253d52ce3a25a918cafbdd9928de8cf PiperOrigin-RevId: 361217394 Source-Link: https://github.com/googleapis/googleapis/commit/3b0afe54b5aedcd7cee0036b16d2a31324d0db60 PiperOrigin-RevId: 359580699 Source-Link: https://github.com/googleapis/googleapis/commit/d9b32e92fa57c37e5af0dc03badfe741170c5849 PiperOrigin-RevId: 359562873 Source-Link: https://github.com/googleapis/googleapis/commit/07932bb995e7dc91b43620ea8402c6668c7d102c PiperOrigin-RevId: 354996675 Source-Link: https://github.com/googleapis/googleapis/commit/20712b8fe95001b312f62c6c5f33e3e3ec92cfaf PiperOrigin-RevId: 352816749 Source-Link: https://github.com/googleapis/googleapis/commit/ceaaf31b3d13badab7cf9d3b570f5639db5593d9 --- language/snippets/api/noxfile.py | 19 ++++++++++--------- language/snippets/classify_text/noxfile.py | 19 ++++++++++--------- language/snippets/cloud-client/v1/noxfile.py | 19 ++++++++++--------- .../snippets/generated-samples/v1/noxfile.py | 19 ++++++++++--------- language/snippets/sentiment/noxfile.py | 19 ++++++++++--------- 5 files changed, 50 insertions(+), 45 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index b90eef00f2d9..97bf7da80e39 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -68,7 +69,7 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} @@ -84,7 +85,7 @@ def get_pytest_env_vars(): # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] @@ -97,7 +98,7 @@ def get_pytest_env_vars(): # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -135,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): +def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: @@ -154,7 +155,7 @@ def lint(session): @nox.session -def blacken(session): +def blacken(session: nox.sessions.Session) -> None: session.install("black") python_files = [path for path in os.listdir(".") if path.endswith(".py")] @@ -168,7 +169,7 @@ def blacken(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -194,7 +195,7 @@ def _session_tests(session, post_install=None): @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) @@ -209,7 +210,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -232,7 +233,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index b90eef00f2d9..97bf7da80e39 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -68,7 +69,7 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} @@ -84,7 +85,7 @@ def get_pytest_env_vars(): # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] @@ -97,7 +98,7 @@ def get_pytest_env_vars(): # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -135,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): +def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: @@ -154,7 +155,7 @@ def lint(session): @nox.session -def blacken(session): +def blacken(session: nox.sessions.Session) -> None: session.install("black") python_files = [path for path in os.listdir(".") if path.endswith(".py")] @@ -168,7 +169,7 @@ def blacken(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -194,7 +195,7 @@ def _session_tests(session, post_install=None): @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) @@ -209,7 +210,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -232,7 +233,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index b90eef00f2d9..97bf7da80e39 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -68,7 +69,7 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} @@ -84,7 +85,7 @@ def get_pytest_env_vars(): # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] @@ -97,7 +98,7 @@ def get_pytest_env_vars(): # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -135,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): +def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: @@ -154,7 +155,7 @@ def lint(session): @nox.session -def blacken(session): +def blacken(session: nox.sessions.Session) -> None: session.install("black") python_files = [path for path in os.listdir(".") if path.endswith(".py")] @@ -168,7 +169,7 @@ def blacken(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -194,7 +195,7 @@ def _session_tests(session, post_install=None): @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) @@ -209,7 +210,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -232,7 +233,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index b90eef00f2d9..97bf7da80e39 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -68,7 +69,7 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} @@ -84,7 +85,7 @@ def get_pytest_env_vars(): # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] @@ -97,7 +98,7 @@ def get_pytest_env_vars(): # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -135,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): +def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: @@ -154,7 +155,7 @@ def lint(session): @nox.session -def blacken(session): +def blacken(session: nox.sessions.Session) -> None: session.install("black") python_files = [path for path in os.listdir(".") if path.endswith(".py")] @@ -168,7 +169,7 @@ def blacken(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -194,7 +195,7 @@ def _session_tests(session, post_install=None): @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) @@ -209,7 +210,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -232,7 +233,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index b90eef00f2d9..97bf7da80e39 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -68,7 +69,7 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} @@ -84,7 +85,7 @@ def get_pytest_env_vars(): # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] @@ -97,7 +98,7 @@ def get_pytest_env_vars(): # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -135,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): +def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: @@ -154,7 +155,7 @@ def lint(session): @nox.session -def blacken(session): +def blacken(session: nox.sessions.Session) -> None: session.install("black") python_files = [path for path in os.listdir(".") if path.endswith(".py")] @@ -168,7 +169,7 @@ def blacken(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -194,7 +195,7 @@ def _session_tests(session, post_install=None): @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) @@ -209,7 +210,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -232,7 +233,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) From 17b59f6f8261ce557bab227343f1a40afbecdf8f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 16 Apr 2021 18:46:02 +0000 Subject: [PATCH 206/323] build: update .OwlBot.lock with new version of post-processor (#95) This PR updates the docker container used for OwlBot. This container performs post-processing tasks when pull-requests are opened on your repository, such as: * copying generated files into place. * generating common files from templates. Version sha256:c0deb0984dd1c56fa04aaf6974f23f4fe674d80f4329310c3f52cd46c40b7419 was published at 2021-04-16T11:10:40.754Z. --- language/snippets/api/noxfile.py | 10 ++++++++-- language/snippets/classify_text/noxfile.py | 10 ++++++++-- language/snippets/cloud-client/v1/noxfile.py | 10 ++++++++-- language/snippets/generated-samples/v1/noxfile.py | 10 ++++++++-- language/snippets/sentiment/noxfile.py | 10 ++++++++-- 5 files changed, 40 insertions(+), 10 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 97bf7da80e39..956cdf4f9250 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -172,10 +172,16 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 97bf7da80e39..956cdf4f9250 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -172,10 +172,16 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 97bf7da80e39..956cdf4f9250 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -172,10 +172,16 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 97bf7da80e39..956cdf4f9250 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -172,10 +172,16 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 97bf7da80e39..956cdf4f9250 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -172,10 +172,16 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) From 69d77ce109f07ae9fd11931aa4f7c4a0adcfae33 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 20 May 2021 19:38:02 +0200 Subject: [PATCH 207/323] chore(deps): update dependency google-auth-httplib2 to v0.1.0 (#80) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-auth-httplib2](https://togithub.com/GoogleCloudPlatform/google-auth-library-python-httplib2) | `==0.0.4` -> `==0.1.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-auth-httplib2/0.1.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-auth-httplib2/0.1.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-auth-httplib2/0.1.0/compatibility-slim/0.0.4)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-auth-httplib2/0.1.0/confidence-slim/0.0.4)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
GoogleCloudPlatform/google-auth-library-python-httplib2 ### [`v0.1.0`](https://togithub.com/GoogleCloudPlatform/google-auth-library-python-httplib2/blob/master/CHANGELOG.md#​010-httpswwwgithubcomgoogleapisgoogle-auth-library-python-httplib2comparev003v010-2021-03-01) [Compare Source](https://togithub.com/GoogleCloudPlatform/google-auth-library-python-httplib2/compare/v0.0.4...v0.1.0) ##### Features - add close method ([#​14](https://www.github.com/googleapis/google-auth-library-python-httplib2/issues/14)) ([feda187](https://www.github.com/googleapis/google-auth-library-python-httplib2/commit/feda187133beeb656fdd7f30ed124ed1e428a74a)) - expose a few httplib2 properties and a method ([#​9](https://www.github.com/googleapis/google-auth-library-python-httplib2/issues/9)) ([e3aa44e](https://www.github.com/googleapis/google-auth-library-python-httplib2/commit/e3aa44e01e2987989671467c7a022ea33829eb2f))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻️ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 98df063bf970..4574239f9eab 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.8 google-auth==1.27.0 -google-auth-httplib2==0.0.4 +google-auth-httplib2==0.1.0 From df9369f111bcff52e58d6f7805e0a220485c8c0e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 20 May 2021 19:40:02 +0200 Subject: [PATCH 208/323] chore(deps): update dependency pytest to v6.2.4 (#97) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [pytest](https://docs.pytest.org/en/latest/) ([source](https://togithub.com/pytest-dev/pytest), [changelog](https://docs.pytest.org/en/stable/changelog.html)) | `==6.0.1` -> `==6.2.4` | [![age](https://badges.renovateapi.com/packages/pypi/pytest/6.2.4/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/pytest/6.2.4/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/pytest/6.2.4/compatibility-slim/6.0.1)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/pytest/6.2.4/confidence-slim/6.0.1)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
pytest-dev/pytest ### [`v6.2.4`](https://togithub.com/pytest-dev/pytest/releases/6.2.4) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.2.3...6.2.4) # pytest 6.2.4 (2021-05-04) ## Bug Fixes - [#​8539](https://togithub.com/pytest-dev/pytest/issues/8539): Fixed assertion rewriting on Python 3.10. ### [`v6.2.3`](https://togithub.com/pytest-dev/pytest/releases/6.2.3) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.2.2...6.2.3) # pytest 6.2.3 (2021-04-03) ## Bug Fixes - [#​8414](https://togithub.com/pytest-dev/pytest/issues/8414): pytest used to create directories under `/tmp` with world-readable permissions. This means that any user in the system was able to read information written by tests in temporary directories (such as those created by the `tmp_path`/`tmpdir` fixture). Now the directories are created with private permissions. pytest used silenty use a pre-existing `/tmp/pytest-of-` directory, even if owned by another user. This means another user could pre-create such a directory and gain control of another user\\'s temporary directory. Now such a condition results in an error. ### [`v6.2.2`](https://togithub.com/pytest-dev/pytest/releases/6.2.2) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.2.1...6.2.2) # pytest 6.2.2 (2021-01-25) ## Bug Fixes - [#​8152](https://togithub.com/pytest-dev/pytest/issues/8152): Fixed "(<Skipped instance>)" being shown as a skip reason in the verbose test summary line when the reason is empty. - [#​8249](https://togithub.com/pytest-dev/pytest/issues/8249): Fix the `faulthandler` plugin for occasions when running with `twisted.logger` and using `pytest --capture=no`. ### [`v6.2.1`](https://togithub.com/pytest-dev/pytest/releases/6.2.1) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.2.0...6.2.1) # pytest 6.2.1 (2020-12-15) ## Bug Fixes - [#​7678](https://togithub.com/pytest-dev/pytest/issues/7678): Fixed bug where `ImportPathMismatchError` would be raised for files compiled in the host and loaded later from an UNC mounted path (Windows). - [#​8132](https://togithub.com/pytest-dev/pytest/issues/8132): Fixed regression in `approx`: in 6.2.0 `approx` no longer raises `TypeError` when dealing with non-numeric types, falling back to normal comparison. Before 6.2.0, array types like tf.DeviceArray fell through to the scalar case, and happened to compare correctly to a scalar if they had only one element. After 6.2.0, these types began failing, because they inherited neither from standard Python number hierarchy nor from `numpy.ndarray`. `approx` now converts arguments to `numpy.ndarray` if they expose the array protocol and are not scalars. This treats array-like objects like numpy arrays, regardless of size. ### [`v6.2.0`](https://togithub.com/pytest-dev/pytest/releases/6.2.0) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.1.2...6.2.0) # pytest 6.2.0 (2020-12-12) ## Breaking Changes - [#​7808](https://togithub.com/pytest-dev/pytest/issues/7808): pytest now supports python3.6+ only. ## Deprecations - [#​7469](https://togithub.com/pytest-dev/pytest/issues/7469): Directly constructing/calling the following classes/functions is now deprecated: - `_pytest.cacheprovider.Cache` - `_pytest.cacheprovider.Cache.for_config()` - `_pytest.cacheprovider.Cache.clear_cache()` - `_pytest.cacheprovider.Cache.cache_dir_from_config()` - `_pytest.capture.CaptureFixture` - `_pytest.fixtures.FixtureRequest` - `_pytest.fixtures.SubRequest` - `_pytest.logging.LogCaptureFixture` - `_pytest.pytester.Pytester` - `_pytest.pytester.Testdir` - `_pytest.recwarn.WarningsRecorder` - `_pytest.recwarn.WarningsChecker` - `_pytest.tmpdir.TempPathFactory` - `_pytest.tmpdir.TempdirFactory` These have always been considered private, but now issue a deprecation warning, which may become a hard error in pytest 7.0.0. - [#​7530](https://togithub.com/pytest-dev/pytest/issues/7530): The `--strict` command-line option has been deprecated, use `--strict-markers` instead. We have plans to maybe in the future to reintroduce `--strict` and make it an encompassing flag for all strictness related options (`--strict-markers` and `--strict-config` at the moment, more might be introduced in the future). - [#​7988](https://togithub.com/pytest-dev/pytest/issues/7988): The `@pytest.yield_fixture` decorator/function is now deprecated. Use pytest.fixture instead. `yield_fixture` has been an alias for `fixture` for a very long time, so can be search/replaced safely. ## Features - [#​5299](https://togithub.com/pytest-dev/pytest/issues/5299): pytest now warns about unraisable exceptions and unhandled thread exceptions that occur in tests on Python>=3.8. See unraisable for more information. - [#​7425](https://togithub.com/pytest-dev/pytest/issues/7425): New pytester fixture, which is identical to testdir but its methods return pathlib.Path when appropriate instead of `py.path.local`. This is part of the movement to use pathlib.Path objects internally, in order to remove the dependency to `py` in the future. Internally, the old Testdir <\_pytest.pytester.Testdir> is now a thin wrapper around Pytester <\_pytest.pytester.Pytester>, preserving the old interface. - [#​7695](https://togithub.com/pytest-dev/pytest/issues/7695): A new hook was added, pytest_markeval_namespace which should return a dictionary. This dictionary will be used to augment the "global" variables available to evaluate skipif/xfail/xpass markers. Pseudo example `conftest.py`: ```{.sourceCode .python} def pytest_markeval_namespace(): return {"color": "red"} ``` `test_func.py`: ```{.sourceCode .python} @​pytest.mark.skipif("color == 'blue'", reason="Color is not red") def test_func(): assert False ``` - [#​8006](https://togithub.com/pytest-dev/pytest/issues/8006): It is now possible to construct a ~pytest.MonkeyPatch object directly as `pytest.MonkeyPatch()`, in cases when the monkeypatch fixture cannot be used. Previously some users imported it from the private \_pytest.monkeypatch.MonkeyPatch namespace. Additionally, MonkeyPatch.context <pytest.MonkeyPatch.context> is now a classmethod, and can be used as `with MonkeyPatch.context() as mp: ...`. This is the recommended way to use `MonkeyPatch` directly, since unlike the `monkeypatch` fixture, an instance created directly is not `undo()`-ed automatically. ## Improvements - [#​1265](https://togithub.com/pytest-dev/pytest/issues/1265): Added an `__str__` implementation to the ~pytest.pytester.LineMatcher class which is returned from `pytester.run_pytest().stdout` and similar. It returns the entire output, like the existing `str()` method. - [#​2044](https://togithub.com/pytest-dev/pytest/issues/2044): Verbose mode now shows the reason that a test was skipped in the test's terminal line after the "SKIPPED", "XFAIL" or "XPASS". - [#​7469](https://togithub.com/pytest-dev/pytest/issues/7469) The types of builtin pytest fixtures are now exported so they may be used in type annotations of test functions. The newly-exported types are: - `pytest.FixtureRequest` for the request fixture. - `pytest.Cache` for the cache fixture. - `pytest.CaptureFixture[str]` for the capfd and capsys fixtures. - `pytest.CaptureFixture[bytes]` for the capfdbinary and capsysbinary fixtures. - `pytest.LogCaptureFixture` for the caplog fixture. - `pytest.Pytester` for the pytester fixture. - `pytest.Testdir` for the testdir fixture. - `pytest.TempdirFactory` for the tmpdir_factory fixture. - `pytest.TempPathFactory` for the tmp_path_factory fixture. - `pytest.MonkeyPatch` for the monkeypatch fixture. - `pytest.WarningsRecorder` for the recwarn fixture. Constructing them is not supported (except for MonkeyPatch); they are only meant for use in type annotations. Doing so will emit a deprecation warning, and may become a hard-error in pytest 7.0. Subclassing them is also not supported. This is not currently enforced at runtime, but is detected by type-checkers such as mypy. - [#​7527](https://togithub.com/pytest-dev/pytest/issues/7527): When a comparison between namedtuple <collections.namedtuple> instances of the same type fails, pytest now shows the differing field names (possibly nested) instead of their indexes. - [#​7615](https://togithub.com/pytest-dev/pytest/issues/7615): Node.warn <\_pytest.nodes.Node.warn> now permits any subclass of Warning, not just PytestWarning <pytest.PytestWarning>. - [#​7701](https://togithub.com/pytest-dev/pytest/issues/7701): Improved reporting when using `--collected-only`. It will now show the number of collected tests in the summary stats. - [#​7710](https://togithub.com/pytest-dev/pytest/issues/7710): Use strict equality comparison for non-numeric types in pytest.approx instead of raising TypeError. This was the undocumented behavior before 3.7, but is now officially a supported feature. - [#​7938](https://togithub.com/pytest-dev/pytest/issues/7938): New `--sw-skip` argument which is a shorthand for `--stepwise-skip`. - [#​8023](https://togithub.com/pytest-dev/pytest/issues/8023): Added `'node_modules'` to default value for norecursedirs. - [#​8032](https://togithub.com/pytest-dev/pytest/issues/8032): doClassCleanups <unittest.TestCase.doClassCleanups> (introduced in unittest in Python and 3.8) is now called appropriately. ## Bug Fixes - [#​4824](https://togithub.com/pytest-dev/pytest/issues/4824): Fixed quadratic behavior and improved performance of collection of items using autouse fixtures and xunit fixtures. - [#​7758](https://togithub.com/pytest-dev/pytest/issues/7758): Fixed an issue where some files in packages are getting lost from `--lf` even though they contain tests that failed. Regressed in pytest 5.4.0. - [#​7911](https://togithub.com/pytest-dev/pytest/issues/7911): Directories created by by tmp_path and tmpdir are now considered stale after 3 days without modification (previous value was 3 hours) to avoid deleting directories still in use in long running test suites. - [#​7913](https://togithub.com/pytest-dev/pytest/issues/7913): Fixed a crash or hang in pytester.spawn <\_pytest.pytester.Pytester.spawn> when the readline module is involved. - [#​7951](https://togithub.com/pytest-dev/pytest/issues/7951): Fixed handling of recursive symlinks when collecting tests. - [#​7981](https://togithub.com/pytest-dev/pytest/issues/7981): Fixed symlinked directories not being followed during collection. Regressed in pytest 6.1.0. - [#​8016](https://togithub.com/pytest-dev/pytest/issues/8016): Fixed only one doctest being collected when using `pytest --doctest-modules path/to/an/__init__.py`. ## Improved Documentation - [#​7429](https://togithub.com/pytest-dev/pytest/issues/7429): Add more information and use cases about skipping doctests. - [#​7780](https://togithub.com/pytest-dev/pytest/issues/7780): Classes which should not be inherited from are now marked `final class` in the API reference. - [#​7872](https://togithub.com/pytest-dev/pytest/issues/7872): `_pytest.config.argparsing.Parser.addini()` accepts explicit `None` and `"string"`. - [#​7878](https://togithub.com/pytest-dev/pytest/issues/7878): In pull request section, ask to commit after editing changelog and authors file. ## Trivial/Internal Changes - [#​7802](https://togithub.com/pytest-dev/pytest/issues/7802): The `attrs` dependency requirement is now >=19.2.0 instead of >=17.4.0. - [#​8014](https://togithub.com/pytest-dev/pytest/issues/8014): .pyc files created by pytest's assertion rewriting now conform to the newer PEP-552 format on Python>=3.7. (These files are internal and only interpreted by pytest itself.) ### [`v6.1.2`](https://togithub.com/pytest-dev/pytest/releases/6.1.2) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.1.1...6.1.2) # pytest 6.1.2 (2020-10-28) ## Bug Fixes - [#​7758](https://togithub.com/pytest-dev/pytest/issues/7758): Fixed an issue where some files in packages are getting lost from `--lf` even though they contain tests that failed. Regressed in pytest 5.4.0. - [#​7911](https://togithub.com/pytest-dev/pytest/issues/7911): Directories created by tmpdir are now considered stale after 3 days without modification (previous value was 3 hours) to avoid deleting directories still in use in long running test suites. ## Improved Documentation - [#​7815](https://togithub.com/pytest-dev/pytest/issues/7815): Improve deprecation warning message for `pytest._fillfuncargs()`. ### [`v6.1.1`](https://togithub.com/pytest-dev/pytest/releases/6.1.1) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.1.0...6.1.1) # pytest 6.1.1 (2020-10-03) ## Bug Fixes - [#​7807](https://togithub.com/pytest-dev/pytest/issues/7807): Fixed regression in pytest 6.1.0 causing incorrect rootdir to be determined in some non-trivial cases where parent directories have config files as well. - [#​7814](https://togithub.com/pytest-dev/pytest/issues/7814): Fixed crash in header reporting when testpaths is used and contains absolute paths (regression in 6.1.0). ### [`v6.1.0`](https://togithub.com/pytest-dev/pytest/releases/6.1.0) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.0.2...6.1.0) # pytest 6.1.0 (2020-09-26) ## Breaking Changes - [#​5585](https://togithub.com/pytest-dev/pytest/issues/5585): As per our policy, the following features which have been deprecated in the 5.X series are now removed: - The `funcargnames` read-only property of `FixtureRequest`, `Metafunc`, and `Function` classes. Use `fixturenames` attribute. - `@pytest.fixture` no longer supports positional arguments, pass all arguments by keyword instead. - Direct construction of `Node` subclasses now raise an error, use `from_parent` instead. - The default value for `junit_family` has changed to `xunit2`. If you require the old format, add `junit_family=xunit1` to your configuration file. - The `TerminalReporter` no longer has a `writer` attribute. Plugin authors may use the public functions of the `TerminalReporter` instead of accessing the `TerminalWriter` object directly. - The `--result-log` option has been removed. Users are recommended to use the [pytest-reportlog](https://togithub.com/pytest-dev/pytest-reportlog) plugin instead. For more information consult [Deprecations and Removals](https://docs.pytest.org/en/stable/deprecations.html) in the docs. ## Deprecations - [#​6981](https://togithub.com/pytest-dev/pytest/issues/6981): The `pytest.collect` module is deprecated: all its names can be imported from `pytest` directly. - [#​7097](https://togithub.com/pytest-dev/pytest/issues/7097): The `pytest._fillfuncargs` function is deprecated. This function was kept for backward compatibility with an older plugin. It's functionality is not meant to be used directly, but if you must replace it, use function.\_request.\_fillfixtures() instead, though note this is not a public API and may break in the future. - [#​7210](https://togithub.com/pytest-dev/pytest/issues/7210): The special `-k '-expr'` syntax to `-k` is deprecated. Use `-k 'not expr'` instead. The special `-k 'expr:'` syntax to `-k` is deprecated. Please open an issue if you use this and want a replacement. - [#​7255](https://togithub.com/pytest-dev/pytest/issues/7255): The pytest_warning_captured <\_pytest.hookspec.pytest_warning_captured> hook is deprecated in favor of pytest_warning_recorded <\_pytest.hookspec.pytest_warning_recorded>, and will be removed in a future version. - [#​7648](https://togithub.com/pytest-dev/pytest/issues/7648): The `gethookproxy()` and `isinitpath()` methods of `FSCollector` and `Package` are deprecated; use `self.session.gethookproxy()` and `self.session.isinitpath()` instead. This should work on all pytest versions. ## Features - [#​7667](https://togithub.com/pytest-dev/pytest/issues/7667): New `--durations-min` command-line flag controls the minimal duration for inclusion in the slowest list of tests shown by `--durations`. Previously this was hard-coded to `0.005s`. ## Improvements - [#​6681](https://togithub.com/pytest-dev/pytest/issues/6681): Internal pytest warnings issued during the early stages of initialization are now properly handled and can filtered through filterwarnings or `--pythonwarnings/-W`. This also fixes a number of long standing issues: [#​2891](https://togithub.com/pytest-dev/pytest/issues/2891), [#​7620](https://togithub.com/pytest-dev/pytest/issues/7620), [#​7426](https://togithub.com/pytest-dev/pytest/issues/7426). - [#​7572](https://togithub.com/pytest-dev/pytest/issues/7572): When a plugin listed in `required_plugins` is missing or an unknown config key is used with `--strict-config`, a simple error message is now shown instead of a stacktrace. - [#​7685](https://togithub.com/pytest-dev/pytest/issues/7685): Added two new attributes rootpath <\_pytest.config.Config.rootpath> and inipath <\_pytest.config.Config.inipath> to Config <\_pytest.config.Config>. These attributes are pathlib.Path versions of the existing rootdir <\_pytest.config.Config.rootdir> and inifile <\_pytest.config.Config.inifile> attributes, and should be preferred over them when possible. - [#​7780](https://togithub.com/pytest-dev/pytest/issues/7780): Public classes which are not designed to be inherited from are now marked [@​final](https://docs.python.org/3/library/typing.html#typing.final). Code which inherits from these classes will trigger a type-checking (e.g. mypy) error, but will still work in runtime. Currently the `final` designation does not appear in the API Reference but hopefully will in the future. ## Bug Fixes - [#​1953](https://togithub.com/pytest-dev/pytest/issues/1953): Fixed error when overwriting a parametrized fixture, while also reusing the super fixture value. ```{.sourceCode .python} ``` ### conftest.py import pytest @​pytest.fixture(params=[1, 2]) def foo(request): return request.param ### test_foo.py import pytest @​pytest.fixture def foo(foo): return foo * 2 ``` - [#​4984](https://togithub.com/pytest-dev/pytest/issues/4984): Fixed an internal error crash with `IndexError: list index out of range` when collecting a module which starts with a decorated function, the decorator raises, and assertion rewriting is enabled. - [#​7591](https://togithub.com/pytest-dev/pytest/issues/7591): pylint shouldn't complain anymore about unimplemented abstract methods when inheriting from File <non-python tests>. - [#​7628](https://togithub.com/pytest-dev/pytest/issues/7628): Fixed test collection when a full path without a drive letter was passed to pytest on Windows (for example `\projects\tests\test.py` instead of `c:\projects\tests\pytest.py`). - [#​7638](https://togithub.com/pytest-dev/pytest/issues/7638): Fix handling of command-line options that appear as paths but trigger an OS-level syntax error on Windows, such as the options used internally by `pytest-xdist`. - [#​7742](https://togithub.com/pytest-dev/pytest/issues/7742): Fixed INTERNALERROR when accessing locals / globals with faulty `exec`. ## Improved Documentation - [#​1477](https://togithub.com/pytest-dev/pytest/issues/1477): Removed faq.rst and its reference in contents.rst. ## Trivial/Internal Changes - [#​7536](https://togithub.com/pytest-dev/pytest/issues/7536): The internal `junitxml` plugin has rewritten to use `xml.etree.ElementTree`. The order of attributes in XML elements might differ. Some unneeded escaping is no longer performed. - [#​7587](https://togithub.com/pytest-dev/pytest/issues/7587): The dependency on the `more-itertools` package has been removed. - [#​7631](https://togithub.com/pytest-dev/pytest/issues/7631): The result type of capfd.readouterr() <\_pytest.capture.CaptureFixture.readouterr> (and similar) is no longer a namedtuple, but should behave like one in all respects. This was done for technical reasons. - [#​7671](https://togithub.com/pytest-dev/pytest/issues/7671): When collecting tests, pytest finds test classes and functions by examining the attributes of python objects (modules, classes and instances). To speed up this process, pytest now ignores builtin attributes (like `__class__`, `__delattr__` and `__new__`) without consulting the python_classes and python_functions configuration options and without passing them to plugins using the pytest_pycollect_makeitem <\_pytest.hookspec.pytest_pycollect_makeitem> hook. ### [`v6.0.2`](https://togithub.com/pytest-dev/pytest/releases/6.0.2) [Compare Source](https://togithub.com/pytest-dev/pytest/compare/6.0.1...6.0.2) # pytest 6.0.2 (2020-09-04) ## Bug Fixes - [#​7148](https://togithub.com/pytest-dev/pytest/issues/7148): Fixed `--log-cli` potentially causing unrelated `print` output to be swallowed. - [#​7672](https://togithub.com/pytest-dev/pytest/issues/7672): Fixed log-capturing level restored incorrectly if `caplog.set_level` is called more than once. - [#​7686](https://togithub.com/pytest-dev/pytest/issues/7686): Fixed NotSetType.token being used as the parameter ID when the parametrization list is empty. Regressed in pytest 6.0.0. - [#​7707](https://togithub.com/pytest-dev/pytest/issues/7707): Fix internal error when handling some exceptions that contain multiple lines or the style uses multiple lines (`--tb=line` for example).
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻️ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index 7e460c8c866e..95ea1e6a02b0 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==6.0.1 +pytest==6.2.4 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index 7e460c8c866e..95ea1e6a02b0 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==6.0.1 +pytest==6.2.4 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index 7e460c8c866e..95ea1e6a02b0 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==6.0.1 +pytest==6.2.4 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index 7e460c8c866e..95ea1e6a02b0 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==6.0.1 +pytest==6.2.4 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index 7e460c8c866e..95ea1e6a02b0 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==6.0.1 +pytest==6.2.4 From 94c413609d47d105e5d5ce9398b26ab421ca2e1a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 20 May 2021 19:44:02 +0200 Subject: [PATCH 209/323] chore(deps): update dependency google-auth to v1.30.0 (#79) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | `==1.27.0` -> `==1.30.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-auth/1.30.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-auth/1.30.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-auth/1.30.0/compatibility-slim/1.27.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-auth/1.30.0/confidence-slim/1.27.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.30.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1300-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1290v1300-2021-04-23) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.29.0...v1.30.0) ##### Features - add reauth support to async user credentials for gcloud ([#​738](https://www.github.com/googleapis/google-auth-library-python/issues/738)) ([9e10823](https://www.github.com/googleapis/google-auth-library-python/commit/9e1082366d113286bc063051fd76b4799791d943)). This internal feature is for gcloud developers only. ### [`v1.29.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1290-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1281v1290-2021-04-15) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.28.1...v1.29.0) ##### Features - add reauth feature to user credentials for gcloud ([#​727](https://www.github.com/googleapis/google-auth-library-python/issues/727)) ([82293fe](https://www.github.com/googleapis/google-auth-library-python/commit/82293fe2caaf5258babb5df1cff0a5ddc9e44b38)). This internal feature is for gcloud developers only. ##### Bug Fixes - Allow multiple audiences for id_token.verify_token ([#​733](https://www.github.com/googleapis/google-auth-library-python/issues/733)) ([56c3946](https://www.github.com/googleapis/google-auth-library-python/commit/56c394680ac6dfc07c611a9eb1e030e32edd4fe1)) ##### [1.28.1](https://www.github.com/googleapis/google-auth-library-python/compare/v1.28.0...v1.28.1) (2021-04-08) ##### Bug Fixes - support custom alg in jwt header for signing ([#​729](https://www.github.com/googleapis/google-auth-library-python/issues/729)) ([0a83706](https://www.github.com/googleapis/google-auth-library-python/commit/0a83706c9d65f7d5a30ea3b42c5beac269ed2a25)) ### [`v1.28.1`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1281-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1280v1281-2021-04-08) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.28.0...v1.28.1) ### [`v1.28.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1280-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1271v1280-2021-03-16) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.27.1...v1.28.0) ##### Features - allow the AWS_DEFAULT_REGION environment variable ([#​721](https://www.github.com/googleapis/google-auth-library-python/issues/721)) ([199da47](https://www.github.com/googleapis/google-auth-library-python/commit/199da4781029916dc075738ec7bd173bd89abe54)) - expose library version at `google.auth.__version` ([#​683](https://www.github.com/googleapis/google-auth-library-python/issues/683)) ([a2cbc32](https://www.github.com/googleapis/google-auth-library-python/commit/a2cbc3245460e1ae1d310de6a2a4007d5a3a06b7)) ##### Bug Fixes - fix unit tests so they can work in g3 ([#​714](https://www.github.com/googleapis/google-auth-library-python/issues/714)) ([d80c85f](https://www.github.com/googleapis/google-auth-library-python/commit/d80c85f285ae1a44ddc5a5d94a66e065a79f6d19)) ##### [1.27.1](https://www.github.com/googleapis/google-auth-library-python/compare/v1.27.0...v1.27.1) (2021-02-26) ##### Bug Fixes - ignore gcloud warning when getting project id ([#​708](https://www.github.com/googleapis/google-auth-library-python/issues/708)) ([3f2f3ea](https://www.github.com/googleapis/google-auth-library-python/commit/3f2f3eaf09006d3d0ec9c030d359114238479279)) - use gcloud creds flow ([#​705](https://www.github.com/googleapis/google-auth-library-python/issues/705)) ([333cb76](https://www.github.com/googleapis/google-auth-library-python/commit/333cb765b52028329ec3ca04edf32c5764b1db68)) ### [`v1.27.1`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1271-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1270v1271-2021-02-26) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.27.0...v1.27.1)
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻️ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 4574239f9eab..c2320332279a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.8 -google-auth==1.27.0 +google-auth==1.30.0 google-auth-httplib2==0.1.0 From dfdb217356fc3df8979986199fe8e4b1bc9672e6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 20 May 2021 21:18:04 +0200 Subject: [PATCH 210/323] chore(deps): update dependency google-api-python-client to v2 (#81) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==1.12.8` -> `==2.5.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.5.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.5.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.5.0/compatibility-slim/1.12.8)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.5.0/confidence-slim/1.12.8)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.5.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​250-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev240v250-2021-05-20) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.4.0...v2.5.0) ##### Features - **adexchangebuyer:** update the api [`46d87cb`](https://togithub.com/googleapis/google-api-python-client/commit/46d87cb3e1f85ec9201134402b3c3afd2eb55770) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **analyticsadmin:** update the api [`9648bae`](https://togithub.com/googleapis/google-api-python-client/commit/9648bae09873a132e7b4627096c153043911be6e) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **analyticsadmin:** update the api [`adaafff`](https://togithub.com/googleapis/google-api-python-client/commit/adaafffbdeab31f05f9ad62d0f58846313bb3858) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **artifactregistry:** update the api [`7dd722f`](https://togithub.com/googleapis/google-api-python-client/commit/7dd722fe8b0ae822f4847219c442aa67a1aae7fd) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **assuredworkloads:** update the api [`9b84ffc`](https://togithub.com/googleapis/google-api-python-client/commit/9b84ffce415133e860cc55bfbd3b9c15c3d46a24) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **cloudasset:** update the api [`a8228db`](https://togithub.com/googleapis/google-api-python-client/commit/a8228db5ef31724493f0f62bf8062aca9adc44aa) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **cloudbuild:** update the api [`c9d8208`](https://togithub.com/googleapis/google-api-python-client/commit/c9d8208c0f9579d958224566af369b809e13016a) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **compute:** update the api [`685c19d`](https://togithub.com/googleapis/google-api-python-client/commit/685c19d4b5262d27a2b1016e01186188afe610fd) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **container:** update the api [`c5cd244`](https://togithub.com/googleapis/google-api-python-client/commit/c5cd244f996b1dfb605ef28eb22f8b0e76bffa1b) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **content:** update the api [`3b3e9be`](https://togithub.com/googleapis/google-api-python-client/commit/3b3e9be7e17c4efa89b45ac671a7c7f627a34cd7) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **dialogflow:** update the api [`0c6b31f`](https://togithub.com/googleapis/google-api-python-client/commit/0c6b31fd2deb75ca1c023fed36903b638f5e74f8) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **dialogflow:** update the api [`254b941`](https://togithub.com/googleapis/google-api-python-client/commit/254b9413a2ede306917031a2117f7af2df28a103) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **documentai:** update the api [`6dcec9f`](https://togithub.com/googleapis/google-api-python-client/commit/6dcec9fd8c0f803d37b4c8355870208e5a8c61ce) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **drive:** update the api [`8788823`](https://togithub.com/googleapis/google-api-python-client/commit/8788823461610f31eebd655915e07def9690da48) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **genomics:** update the api [`d0e6cc4`](https://togithub.com/googleapis/google-api-python-client/commit/d0e6cc48df2d0a00d91ce6fbab83aa82146f3573) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **logging:** update the api [`7f5fa16`](https://togithub.com/googleapis/google-api-python-client/commit/7f5fa161fd3db9ca6f2df23f5c8bd41ba01e9b9c) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **manufacturers:** update the api [`25bf19f`](https://togithub.com/googleapis/google-api-python-client/commit/25bf19f14a09428ab3fc6e51b0f6812867f99b04) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **privateca:** update the api [`0a5c31d`](https://togithub.com/googleapis/google-api-python-client/commit/0a5c31d74f788444640c174c413b12d494a00f1a) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **prod_tt_sasportal:** update the api [`af243b5`](https://togithub.com/googleapis/google-api-python-client/commit/af243b57a7039f4e01259fb085c7b07a66106fcf) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **pubsublite:** update the api [`dd67e9b`](https://togithub.com/googleapis/google-api-python-client/commit/dd67e9b117fdc8d0d0ecff6ade657003a95c12f7) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **recommender:** update the api [`4b261d9`](https://togithub.com/googleapis/google-api-python-client/commit/4b261d97bea2a8bc042a274c2d904be09da2d82c) ([c2cd326](https://www.github.com/googleapis/google-api-python-client/commit/c2cd326ef156fc2652d23e4c64fd06e2d66e3a80)) - **redis:** update the api [`5228389`](https://togithub.com/googleapis/google-api-python-client/commit/5228389cbd5fceb1bf8c2d36086faa147d91e50f) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) - **remotebuildexecution:** update the api [`7c8b314`](https://togithub.com/googleapis/google-api-python-client/commit/7c8b314e5508dda81cfb673039ea032f593fa97d) ([7700bbf](https://www.github.com/googleapis/google-api-python-client/commit/7700bbffda386345cc4426ef413fc643f6368ef4)) ### [`v2.4.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​240-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev230v240-2021-05-11) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.3.0...v2.4.0) ##### Features - **alertcenter:** update the api ([cbf5364](https://www.github.com/googleapis/google-api-python-client/commit/cbf5364f32932e6dc0baebfb3787a9f2fc889819)) - **analyticsadmin:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **androidenterprise:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **androidpublisher:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **artifactregistry:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **bigquery:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **chromepolicy:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **content:** update the api ([c0b883a](https://www.github.com/googleapis/google-api-python-client/commit/c0b883a43d90c27153eb1d205d52cd5d8b66c39a)) - **datacatalog:** update the api ([e58efe8](https://www.github.com/googleapis/google-api-python-client/commit/e58efe85e5988c93399dd3cf5290620d67baf038)) - **dataproc:** update the api ([cbf5364](https://www.github.com/googleapis/google-api-python-client/commit/cbf5364f32932e6dc0baebfb3787a9f2fc889819)) - **dialogflow:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **dns:** update the api ([c0b883a](https://www.github.com/googleapis/google-api-python-client/commit/c0b883a43d90c27153eb1d205d52cd5d8b66c39a)) - **documentai:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **file:** update the api ([cbf5364](https://www.github.com/googleapis/google-api-python-client/commit/cbf5364f32932e6dc0baebfb3787a9f2fc889819)) - **file:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **firebasestorage:** update the api ([27f691d](https://www.github.com/googleapis/google-api-python-client/commit/27f691d2f256447a41f44c77175edd0f37dddbdc)) - **gameservices:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **gkehub:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **lifesciences:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **monitoring:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **mybusinessaccountmanagement:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **networkmanagement:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **oslogin:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **pubsublite:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **recommender:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **retail:** update the api ([cbf5364](https://www.github.com/googleapis/google-api-python-client/commit/cbf5364f32932e6dc0baebfb3787a9f2fc889819)) - **servicedirectory:** update the api ([44a6719](https://www.github.com/googleapis/google-api-python-client/commit/44a6719b9f0024df4f4a4640743015507dbd0e94)) - **servicemanagement:** update the api ([c0b883a](https://www.github.com/googleapis/google-api-python-client/commit/c0b883a43d90c27153eb1d205d52cd5d8b66c39a)) - **servicenetworking:** update the api ([bfa2f1c](https://www.github.com/googleapis/google-api-python-client/commit/bfa2f1caee54b6f6bc8760a1d20e7014e607bd7f)) - **translate:** update the api ([c0b883a](https://www.github.com/googleapis/google-api-python-client/commit/c0b883a43d90c27153eb1d205d52cd5d8b66c39a)) ##### Bug Fixes - preventing accessing predefined discovery URLs when override is provided ([#​1324](https://www.github.com/googleapis/google-api-python-client/issues/1324)) ([1c4d199](https://www.github.com/googleapis/google-api-python-client/commit/1c4d1998086d89238ca5d961bc1c8eee5685345c)) ### [`v2.3.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​230-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev220v230-2021-04-28) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.2.0...v2.3.0) ##### Features - **apigee:** update the api ([3fd11cb](https://www.github.com/googleapis/google-api-python-client/commit/3fd11cbfa43679d14be7f09d9cb071d82d156ffa)) - **dataflow:** update the api ([3fd11cb](https://www.github.com/googleapis/google-api-python-client/commit/3fd11cbfa43679d14be7f09d9cb071d82d156ffa)) - **dialogflow:** update the api ([3fd11cb](https://www.github.com/googleapis/google-api-python-client/commit/3fd11cbfa43679d14be7f09d9cb071d82d156ffa)) - **documentai:** update the api ([3fd11cb](https://www.github.com/googleapis/google-api-python-client/commit/3fd11cbfa43679d14be7f09d9cb071d82d156ffa)) - **healthcare:** update the api ([3fd11cb](https://www.github.com/googleapis/google-api-python-client/commit/3fd11cbfa43679d14be7f09d9cb071d82d156ffa)) - **osconfig:** update the api ([afea316](https://www.github.com/googleapis/google-api-python-client/commit/afea316d32842ecb9e7d626842d5926b0bf3e34f)) - **sqladmin:** update the api ([cec4393](https://www.github.com/googleapis/google-api-python-client/commit/cec4393b8e37e229f68b2233a2041db062c2a335)) ### [`v2.2.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​220-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev210v220-2021-04-13) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.1.0...v2.2.0) ##### Features - Adds support for errors.py to also use 'errors' for error_details ([#​1281](https://www.github.com/googleapis/google-api-python-client/issues/1281)) ([a5d2081](https://www.github.com/googleapis/google-api-python-client/commit/a5d20813e8d7589b0cec030c149748e53ea555a5)) ### [`v2.1.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​210-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev202v210-2021-03-31) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.0.2...v2.1.0) ##### Features - add status_code property on http error handling ([#​1185](https://www.github.com/googleapis/google-api-python-client/issues/1185)) ([db2a766](https://www.github.com/googleapis/google-api-python-client/commit/db2a766bbd976742f6ef10d721d8423c8ac9246d)) ##### Bug Fixes - Change default of `static_discovery` when `discoveryServiceUrl` set ([#​1261](https://www.github.com/googleapis/google-api-python-client/issues/1261)) ([3b4f2e2](https://www.github.com/googleapis/google-api-python-client/commit/3b4f2e243709132b5ca41a3c23853d5067dfb0ab)) - correct api version in oauth-installed.md ([#​1258](https://www.github.com/googleapis/google-api-python-client/issues/1258)) ([d1a255f](https://www.github.com/googleapis/google-api-python-client/commit/d1a255fcbeaa36f615cede720692fea2b9f894db)) - fix .close() ([#​1231](https://www.github.com/googleapis/google-api-python-client/issues/1231)) ([a9583f7](https://www.github.com/googleapis/google-api-python-client/commit/a9583f712d13c67aa282d14cd30e00999b530d7c)) - Resolve issue where num_retries would have no effect ([#​1244](https://www.github.com/googleapis/google-api-python-client/issues/1244)) ([c518472](https://www.github.com/googleapis/google-api-python-client/commit/c518472e836c32ba2ff5e8480ab5a7643f722d46)) ##### Documentation - Distinguish between public/private docs in 2.0 guide ([#​1226](https://www.github.com/googleapis/google-api-python-client/issues/1226)) ([a6f1706](https://www.github.com/googleapis/google-api-python-client/commit/a6f17066caf6e911b7e94e8feab52fa3af2def1b)) - Update README to promote cloud client libraries ([#​1252](https://www.github.com/googleapis/google-api-python-client/issues/1252)) ([22807c9](https://www.github.com/googleapis/google-api-python-client/commit/22807c92ce754ff3d60f240ec5c38de50c5b654b)) ##### [2.0.2](https://www.github.com/googleapis/google-api-python-client/compare/v2.0.1...v2.0.2) (2021-03-04) ##### Bug Fixes - Include discovery artifacts in published package ([#​1221](https://www.github.com/googleapis/google-api-python-client/issues/1221)) ([ad618d0](https://www.github.com/googleapis/google-api-python-client/commit/ad618d0b266b86a795871d946367552905f4ccb6)) ##### [2.0.1](https://www.github.com/googleapis/google-api-python-client/compare/v2.0.0...v2.0.1) (2021-03-04) ##### Bug Fixes - add static discovery docs ([#​1216](https://www.github.com/googleapis/google-api-python-client/issues/1216)) ([b5d33d6](https://www.github.com/googleapis/google-api-python-client/commit/b5d33d6d520ca9589eefd08d34fe96844f420bce)) ##### Documentation - add a link to the migration guide in the changelog ([#​1213](https://www.github.com/googleapis/google-api-python-client/issues/1213)) ([b85da5b](https://www.github.com/googleapis/google-api-python-client/commit/b85da5bb7d6d6da60ff611221d3c4719eadb478a)) ### [`v2.0.2`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​202-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev201v202-2021-03-04) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.0.1...v2.0.2) ### [`v2.0.1`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​201-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev200v201-2021-03-04) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.0.0...v2.0.1) ### [`v2.0.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​200-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev1128v200-2021-03-03) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v1.12.8...v2.0.0) ##### ⚠ BREAKING CHANGES The 2.0 release of `google-api-python-client` is a significant upgrade compared to v1. Please see the [Migration Guide](UPGRADING.md) for more information. - **deps:** require 3.6+. ([#​961](https://togithub.com/googleapis/google-api-python-client/issues/961)) ##### Features - Add support for using static discovery documents ([#​1109](https://www.github.com/googleapis/google-api-python-client/issues/1109)) ([32d1c59](https://www.github.com/googleapis/google-api-python-client/commit/32d1c597b364e2641eca33ccf6df802bb218eea1)) - Update synth.py to copy discovery files from discovery-artifact-manager ([#​1104](https://www.github.com/googleapis/google-api-python-client/issues/1104)) ([af918e8](https://www.github.com/googleapis/google-api-python-client/commit/af918e8ef422438aaca0c468de8b3b2c184d884e)) ##### Bug Fixes - Catch ECONNRESET and other errors more reliably ([#​1147](https://www.github.com/googleapis/google-api-python-client/issues/1147)) ([ae9cd99](https://www.github.com/googleapis/google-api-python-client/commit/ae9cd99134160a5540e6f8d6d33d855122854e10)) - **deps:** add upper-bound google-auth dependency ([#​1180](https://www.github.com/googleapis/google-api-python-client/issues/1180)) ([c687f42](https://www.github.com/googleapis/google-api-python-client/commit/c687f4207b9c574e539a7eab75201a58f2e91f35)) - handle error on service not enabled ([#​1117](https://www.github.com/googleapis/google-api-python-client/issues/1117)) ([c691283](https://www.github.com/googleapis/google-api-python-client/commit/c6912836e88eea45aef7d515383e549082d37717)) - Improve support for error_details ([#​1126](https://www.github.com/googleapis/google-api-python-client/issues/1126)) ([e6a1da3](https://www.github.com/googleapis/google-api-python-client/commit/e6a1da3542e230e5287863f339ce1d28292cd92f)) - MediaFileUpload error if file does not exist ([#​1127](https://www.github.com/googleapis/google-api-python-client/issues/1127)) ([2c6d029](https://www.github.com/googleapis/google-api-python-client/commit/2c6d0297851c806ef850ca23686c51ca5878ac48)) - replace deprecated socket.error with OSError ([#​1161](https://www.github.com/googleapis/google-api-python-client/issues/1161)) ([b7b9986](https://www.github.com/googleapis/google-api-python-client/commit/b7b9986fe13c483eeefb77673b4091911978ee46)) - Use logging level info when file_cache is not available ([#​1125](https://www.github.com/googleapis/google-api-python-client/issues/1125)) ([0b32e69](https://www.github.com/googleapis/google-api-python-client/commit/0b32e69900eafec2cd1197ba054d4f9a765a3f29)) ##### Miscellaneous Chores - **deps:** require 3.6+ ([#​961](https://www.github.com/googleapis/google-api-python-client/issues/961)) ([8325d24](https://www.github.com/googleapis/google-api-python-client/commit/8325d24acaa2b2077acaaea26ea5fafb6dd856c5)) ##### Documentation - add networkconnectivity v1alpha1 ([#​1176](https://www.github.com/googleapis/google-api-python-client/issues/1176)) ([91b61d3](https://www.github.com/googleapis/google-api-python-client/commit/91b61d3272de9b5aebad0cf1eb76ca53c24f22f9)) - Delete redundant oauth-web.md ([#​1142](https://www.github.com/googleapis/google-api-python-client/issues/1142)) ([70bc6c9](https://www.github.com/googleapis/google-api-python-client/commit/70bc6c9db99eed5af7536b87448bd9323db9320b)) - fix MediaIoBaseUpload broken link ([#​1112](https://www.github.com/googleapis/google-api-python-client/issues/1112)) ([334b6e6](https://www.github.com/googleapis/google-api-python-client/commit/334b6e6d9e4924398e57bad2e53747584abf8cf4)) - fix regression with incorrect args order in docs ([#​1141](https://www.github.com/googleapis/google-api-python-client/issues/1141)) ([4249a7b](https://www.github.com/googleapis/google-api-python-client/commit/4249a7b92e891d1ecaf93944ca9c062ffbd54f77)) - fix typo in thread safety example code ([#​1100](https://www.github.com/googleapis/google-api-python-client/issues/1100)) ([5ae088d](https://www.github.com/googleapis/google-api-python-client/commit/5ae088dc027b89517b896a89a0aeb2ca80f492cf)) - Reduce noisy changes in docs regen ([#​1135](https://www.github.com/googleapis/google-api-python-client/issues/1135)) ([b1b0c83](https://www.github.com/googleapis/google-api-python-client/commit/b1b0c83ae0737e7b63cb77e4e7757213a216b88e)) - update docs/dyn ([#​1096](https://www.github.com/googleapis/google-api-python-client/issues/1096)) ([c2228be](https://www.github.com/googleapis/google-api-python-client/commit/c2228be4630e279e02a25b51566a0f93b67aa499)) - update guidance on service accounts ([#​1120](https://www.github.com/googleapis/google-api-python-client/issues/1120)) ([b2ea122](https://www.github.com/googleapis/google-api-python-client/commit/b2ea122c40ccac09c9e7b0b29f6b2bcca6db107b)) ##### [1.12.8](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.7...v1.12.8) (2020-11-18) ##### Documentation - add httplib2 authorization to thread_safety ([#​1005](https://www.github.com/googleapis/google-api-python-client/issues/1005)) ([205ae59](https://www.github.com/googleapis/google-api-python-client/commit/205ae5988bd89676823088d6c8a7bd17e3beefcf)), closes [#​808](https://www.github.com/googleapis/google-api-python-client/issues/808) [#​808](https://www.github.com/googleapis/google-api-python-client/issues/808) ##### [1.12.7](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.6...v1.12.7) (2020-11-17) ##### Documentation - Update Webmasters API sample ([#​1092](https://www.github.com/googleapis/google-api-python-client/issues/1092)) ([12831f3](https://www.github.com/googleapis/google-api-python-client/commit/12831f3e4716292b55b63dd2b08c3351f09b8a15)) ##### [1.12.6](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.5...v1.12.6) (2020-11-16) ##### Documentation - Change error parsing to check for 'message' ([#​1083](https://www.github.com/googleapis/google-api-python-client/issues/1083)) ([a341c5a](https://www.github.com/googleapis/google-api-python-client/commit/a341c5a5e31ba16da109658127b58cb7e5dbeedd)), closes [#​1082](https://www.github.com/googleapis/google-api-python-client/issues/1082) - Update oauth docs to include snippet to get email address of authenticated user ([#​1088](https://www.github.com/googleapis/google-api-python-client/issues/1088)) ([25fba64](https://www.github.com/googleapis/google-api-python-client/commit/25fba648ea647b62f2a6edc54ae927c1ed381b45)), closes [#​1071](https://www.github.com/googleapis/google-api-python-client/issues/1071) ##### [1.12.5](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.4...v1.12.5) (2020-10-22) ##### Bug Fixes - don't raise when downloading zero byte files ([#​1074](https://www.github.com/googleapis/google-api-python-client/issues/1074)) ([86d8788](https://www.github.com/googleapis/google-api-python-client/commit/86d8788ee8a766ca6818620f3fd2899be0e44190)) ##### [1.12.4](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.3...v1.12.4) (2020-10-20) ##### Bug Fixes - don't set content-range on empty uploads ([#​1070](https://www.github.com/googleapis/google-api-python-client/issues/1070)) ([af6035f](https://www.github.com/googleapis/google-api-python-client/commit/af6035f6754a155ee6b04bbbc5c39410c7316d6a)) ##### Documentation - fix typo in oauth.md ([#​1058](https://www.github.com/googleapis/google-api-python-client/issues/1058)) ([30eff9d](https://www.github.com/googleapis/google-api-python-client/commit/30eff9d8276919b8c4e50df2d3b1982594423692)) - update generated docs ([#​1053](https://www.github.com/googleapis/google-api-python-client/issues/1053)) ([3e17f89](https://www.github.com/googleapis/google-api-python-client/commit/3e17f8990db54bec16c48c319072799a14f5a53f)), closes [#​1049](https://www.github.com/googleapis/google-api-python-client/issues/1049) ##### [1.12.3](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.2...v1.12.3) (2020-09-29) ##### Bug Fixes - **deps:** update setup.py to install httplib2>=0.15.0 ([#​1050](https://www.github.com/googleapis/google-api-python-client/issues/1050)) ([c00f70d](https://www.github.com/googleapis/google-api-python-client/commit/c00f70d565a002b92374356be087927b131ce135)) ##### [1.12.2](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.1...v1.12.2) (2020-09-23) ##### Bug Fixes - add method to close httplib2 connections ([#​1038](https://www.github.com/googleapis/google-api-python-client/issues/1038)) ([98888da](https://www.github.com/googleapis/google-api-python-client/commit/98888dadf04e7e00524b6de273d28d02d7abc2c0)), closes [#​618](https://www.github.com/googleapis/google-api-python-client/issues/618) ##### [1.12.1](https://www.github.com/googleapis/google-api-python-client/compare/v1.12.0...v1.12.1) (2020-09-14) ##### Bug Fixes - **deps:** require six>=1.13.0 ([#​1030](https://www.github.com/googleapis/google-api-python-client/issues/1030)) ([4acecc3](https://www.github.com/googleapis/google-api-python-client/commit/4acecc3c0cd31308f9a256f065b7b1d1c3a4798d))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻️ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index c2320332279a..04d8d0ae1ecc 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.12.8 +google-api-python-client==2.5.0 google-auth==1.30.0 google-auth-httplib2==0.1.0 From d236c1cdbd249a9c8b163c6a2ceb3fe2d0606acb Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 22 May 2021 09:32:06 +0000 Subject: [PATCH 211/323] chore: new owl bot post processor docker image (#115) gcr.io/repo-automation-bots/owlbot-python:latest@sha256:3c3a445b3ddc99ccd5d31edc4b4519729635d20693900db32c4f587ed51f7479 --- language/snippets/api/noxfile.py | 8 +++++++- language/snippets/classify_text/noxfile.py | 8 +++++++- language/snippets/cloud-client/v1/noxfile.py | 8 +++++++- language/snippets/generated-samples/v1/noxfile.py | 8 +++++++- language/snippets/sentiment/noxfile.py | 8 +++++++- 5 files changed, 35 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 956cdf4f9250..5ff9e1db5808 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -50,7 +50,10 @@ # to use your own Cloud project. 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. 'envs': {}, @@ -170,6 +173,9 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): if os.path.exists("constraints.txt"): diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 956cdf4f9250..5ff9e1db5808 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -50,7 +50,10 @@ # to use your own Cloud project. 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. 'envs': {}, @@ -170,6 +173,9 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): if os.path.exists("constraints.txt"): diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 956cdf4f9250..5ff9e1db5808 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -50,7 +50,10 @@ # to use your own Cloud project. 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. 'envs': {}, @@ -170,6 +173,9 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): if os.path.exists("constraints.txt"): diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 956cdf4f9250..5ff9e1db5808 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -50,7 +50,10 @@ # to use your own Cloud project. 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. 'envs': {}, @@ -170,6 +173,9 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): if os.path.exists("constraints.txt"): diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 956cdf4f9250..5ff9e1db5808 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -50,7 +50,10 @@ # to use your own Cloud project. 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. 'envs': {}, @@ -170,6 +173,9 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): if os.path.exists("constraints.txt"): From 4e79a6b6393cf02b74977f3cc1c5f0cb5a5df794 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 25 May 2021 16:53:06 +0200 Subject: [PATCH 212/323] chore(deps): update dependency google-auth to v1.30.1 (#117) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 04d8d0ae1ecc..eb7666ab4902 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.5.0 -google-auth==1.30.0 +google-auth==1.30.1 google-auth-httplib2==0.1.0 From c414299b6545bf4cdc16848370cf544ec3f4f4a9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 27 May 2021 20:03:14 +0200 Subject: [PATCH 213/323] chore(deps): update dependency google-api-python-client to v2.6.0 (#118) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index eb7666ab4902..f33cfcc18db1 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.5.0 +google-api-python-client==2.6.0 google-auth==1.30.1 google-auth-httplib2==0.1.0 From 961d6d0963210c1c700c6b39cd55b178f0a47043 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 11 Jun 2021 14:36:04 +0200 Subject: [PATCH 214/323] chore(deps): update dependency google-auth to v1.31.0 (#122) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | `==1.30.1` -> `==1.31.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-auth/1.31.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-auth/1.31.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-auth/1.31.0/compatibility-slim/1.30.1)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-auth/1.31.0/confidence-slim/1.30.1)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.31.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1310-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1302v1310-2021-06-09) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.30.2...v1.31.0) ##### Features - define useful properties on `google.auth.external_account.Credentials` ([#​770](https://www.github.com/googleapis/google-auth-library-python/issues/770)) ([f97499c](https://www.github.com/googleapis/google-auth-library-python/commit/f97499c718af70d17c17e0c58d6381273eceabcd)) ##### Bug Fixes - avoid deleting items while iterating ([#​772](https://www.github.com/googleapis/google-auth-library-python/issues/772)) ([a5e6b65](https://www.github.com/googleapis/google-auth-library-python/commit/a5e6b651aa8ad407ce087fe32f40b46925bae527)) ##### [1.30.2](https://www.github.com/googleapis/google-auth-library-python/compare/v1.30.1...v1.30.2) (2021-06-03) ##### Bug Fixes - **dependencies:** add urllib3 and requests to aiohttp extra ([#​755](https://www.github.com/googleapis/google-auth-library-python/issues/755)) ([a923442](https://www.github.com/googleapis/google-auth-library-python/commit/a9234423cb2b69068fc0d30a5a0ee86a599ab8b7)) - enforce constraints during unit tests ([#​760](https://www.github.com/googleapis/google-auth-library-python/issues/760)) ([1a6496a](https://www.github.com/googleapis/google-auth-library-python/commit/1a6496abfc17ab781bfa485dc74d0f7dbbe0c44b)), closes [#​759](https://www.github.com/googleapis/google-auth-library-python/issues/759) - session object was never used in aiohttp request ([#​700](https://www.github.com/googleapis/google-auth-library-python/issues/700)) ([#​701](https://www.github.com/googleapis/google-auth-library-python/issues/701)) ([09e0389](https://www.github.com/googleapis/google-auth-library-python/commit/09e0389db72cc9d6c5dde34864cb54d717dc0b92)) ##### [1.30.1](https://www.github.com/googleapis/google-auth-library-python/compare/v1.30.0...v1.30.1) (2021-05-20) ##### Bug Fixes - allow user to customize context aware metadata path in \_mtls_helper ([#​754](https://www.github.com/googleapis/google-auth-library-python/issues/754)) ([e697687](https://www.github.com/googleapis/google-auth-library-python/commit/e6976879b392508c022610ab3ea2ea55c7089c63)) - fix function name in signing error message ([#​751](https://www.github.com/googleapis/google-auth-library-python/issues/751)) ([e9ca25f](https://www.github.com/googleapis/google-auth-library-python/commit/e9ca25fa39a112cc1a376388ab47a4e1b3ea746c)) ### [`v1.30.2`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1302-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1301v1302-2021-06-03) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.30.1...v1.30.2)
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index f33cfcc18db1..4a7d27a06129 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.6.0 -google-auth==1.30.1 +google-auth==1.31.0 google-auth-httplib2==0.1.0 From b3044a8b4b2bf7f7ce12f65b90b07e2a791063de Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 11 Jun 2021 14:42:02 +0200 Subject: [PATCH 215/323] chore(deps): update dependency google-api-python-client to v2.8.0 (#120) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.6.0` -> `==2.8.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.8.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.8.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.8.0/compatibility-slim/2.6.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.8.0/confidence-slim/2.6.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.8.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​280-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev270v280-2021-06-08) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.7.0...v2.8.0) ##### Features - **apigee:** update the api https://github.com/googleapis/google-api-python-client/commit/e1ea8735612457f6f8b85226887babd904958b25 ([cb945f3](https://www.github.com/googleapis/google-api-python-client/commit/cb945f37130d2950801e02512761f061cef0b54e)) - **bigquery:** update the api https://github.com/googleapis/google-api-python-client/commit/73965daab29cd6ae78004ede62f8c6c80f5587a3 ([31fbcc0](https://www.github.com/googleapis/google-api-python-client/commit/31fbcc014f8642fb0cde7de47889b55a2eaf3f71)) - **compute:** update the api https://github.com/googleapis/google-api-python-client/commit/b8ce2754752f8157b84091a99594f9a45a8f8eed ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **container:** update the api https://github.com/googleapis/google-api-python-client/commit/a73f41e49d7ab6258bd722b4ee6d022c195975c2 ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **content:** update the api https://github.com/googleapis/google-api-python-client/commit/097e3329e1e5de3ae416cdabc9a73e2fa63a09e9 ([cb945f3](https://www.github.com/googleapis/google-api-python-client/commit/cb945f37130d2950801e02512761f061cef0b54e)) - **dataproc:** update the api https://github.com/googleapis/google-api-python-client/commit/be0dde6ee43f4ff05396d33b16e0af2a1fabfc28 ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **dialogflow:** update the api https://github.com/googleapis/google-api-python-client/commit/f7b0ebc0047427b3633480999ed28e0f37fa77f1 ([50e1b7a](https://www.github.com/googleapis/google-api-python-client/commit/50e1b7a1b5c337926c5d2b2f648f057d67431cd6)) - **displayvideo:** update the api https://github.com/googleapis/google-api-python-client/commit/f6b1a8e2d291c2ac9d2ea590101bb3c8c6fbe6cf ([eb505db](https://www.github.com/googleapis/google-api-python-client/commit/eb505dbed724dbd07b151d06fd1b45037dc7e75f)) - **documentai:** update the api https://github.com/googleapis/google-api-python-client/commit/72f3faea1be17c074dc566b33707dad37c9ba16b ([cb945f3](https://www.github.com/googleapis/google-api-python-client/commit/cb945f37130d2950801e02512761f061cef0b54e)) - **lifesciences:** update the api https://github.com/googleapis/google-api-python-client/commit/c524c0a316e4206c8b0e0075e3ed5eceb7e60016 ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **metastore:** update the api https://github.com/googleapis/google-api-python-client/commit/54639a05ea77c1a067ed1e3b5df46b2c029c47ea ([4d1153d](https://www.github.com/googleapis/google-api-python-client/commit/4d1153db18a3edf86c5bb83149b4f1c0ba95f810)) - **metastore:** update the api https://github.com/googleapis/google-api-python-client/commit/c9632ee831b9c135f3a0c018b3fdfe73d7e698a4 ([7357b05](https://www.github.com/googleapis/google-api-python-client/commit/7357b05a33a3780716b77161f86f247d92d91903)) - **osconfig:** update the api https://github.com/googleapis/google-api-python-client/commit/5dbaaad34dec45eb5f5a9e98710b3ec05b4d5429 ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **pagespeedonline:** update the api https://github.com/googleapis/google-api-python-client/commit/47d41c544376b1911261410235b63ffe3e5faa91 ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **privateca:** update the api https://github.com/googleapis/google-api-python-client/commit/8f7ad0d176d61f9e9a409d7fe35b20c5f1c239a5 ([8759538](https://www.github.com/googleapis/google-api-python-client/commit/8759538c0ab491be1678db74a3ad538957610d70)) - **realtimebidding:** update the api https://github.com/googleapis/google-api-python-client/commit/34d5d2606070b0c6fef053d6b88a65be085227b5 ([31fbcc0](https://www.github.com/googleapis/google-api-python-client/commit/31fbcc014f8642fb0cde7de47889b55a2eaf3f71)) - **sasportal:** update the api https://github.com/googleapis/google-api-python-client/commit/ca30eddc3d583c1851cc2f70f37c1d9f81f4342f ([50e1b7a](https://www.github.com/googleapis/google-api-python-client/commit/50e1b7a1b5c337926c5d2b2f648f057d67431cd6)) - **servicemanagement:** update the api https://github.com/googleapis/google-api-python-client/commit/491bafaefd792deae68c24337ebd7011faeb723b ([cb945f3](https://www.github.com/googleapis/google-api-python-client/commit/cb945f37130d2950801e02512761f061cef0b54e)) - **youtube:** update the api https://github.com/googleapis/google-api-python-client/commit/981cfb0ae51df0d2f48152bb74f79840ca19727a ([50e1b7a](https://www.github.com/googleapis/google-api-python-client/commit/50e1b7a1b5c337926c5d2b2f648f057d67431cd6)) ### [`v2.7.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​270-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev260v270-2021-06-01) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.6.0...v2.7.0) ##### Features - **adexchangebuyer:** update the api https://github.com/googleapis/google-api-python-client/commit/3cf7a8dceb567f3c89c307f3496c381af91b0fc6 ([ab1d6dc](https://www.github.com/googleapis/google-api-python-client/commit/ab1d6dc365fc482d482de197da7f7583afd04bd0)) - **admin:** update the api https://github.com/googleapis/google-api-python-client/commit/7bac81fc588ccbe7b5e6c75af52b719e73efd118 ([ab1d6dc](https://www.github.com/googleapis/google-api-python-client/commit/ab1d6dc365fc482d482de197da7f7583afd04bd0)) - **androidmanagement:** update the api https://github.com/googleapis/google-api-python-client/commit/877990251a43acbc447a1f2f963beb3bbfc6352f ([bdce941](https://www.github.com/googleapis/google-api-python-client/commit/bdce9419ca05d20e0eecd817f404f292a56ce79c)) - **apigee:** update the api https://github.com/googleapis/google-api-python-client/commit/37f31420ffc3adb1bdd23d7fc91f80701522aac8 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **bigquery:** update the api https://github.com/googleapis/google-api-python-client/commit/086d714317a73331fcfdf4027496c3b36354955f ([508c39f](https://www.github.com/googleapis/google-api-python-client/commit/508c39fa665c901d9d754aa31dc9d1af45469ec4)) - **container:** update the api https://github.com/googleapis/google-api-python-client/commit/514acdbf2c7eeaf6b1b9773c63b180131418ff57 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **content:** update the api https://github.com/googleapis/google-api-python-client/commit/aab557d6c59a5c414d0ac0bc6349763523c9816f ([ab1d6dc](https://www.github.com/googleapis/google-api-python-client/commit/ab1d6dc365fc482d482de197da7f7583afd04bd0)) - **content:** update the api https://github.com/googleapis/google-api-python-client/commit/eaf742d4e933744abc72c1808f1e5a16dccaa1d4 ([bdce941](https://www.github.com/googleapis/google-api-python-client/commit/bdce9419ca05d20e0eecd817f404f292a56ce79c)) - **dataflow:** update the api https://github.com/googleapis/google-api-python-client/commit/d979251cc4f8f537a875841cc0f6d86bbe0f195b ([38664e8](https://www.github.com/googleapis/google-api-python-client/commit/38664e8dec117413b8d27fc7230eb9c351d2c0de)) - **dfareporting:** update the api https://github.com/googleapis/google-api-python-client/commit/c83912bec60626d3388fbe749d7a395fa3bc6c22 ([ab1d6dc](https://www.github.com/googleapis/google-api-python-client/commit/ab1d6dc365fc482d482de197da7f7583afd04bd0)) - **dlp:** update the api https://github.com/googleapis/google-api-python-client/commit/7e3d1c4ab85d50307d42af3048f9a7dd47a2b9eb ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **documentai:** update the api https://github.com/googleapis/google-api-python-client/commit/222030d8c1583f49657862a308b5eae41311d7e7 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **doubleclickbidmanager:** update the api https://github.com/googleapis/google-api-python-client/commit/895ff465e58dffd1f6e29dffd673418c76007e1b ([ab1d6dc](https://www.github.com/googleapis/google-api-python-client/commit/ab1d6dc365fc482d482de197da7f7583afd04bd0)) - **firebase:** update the api https://github.com/googleapis/google-api-python-client/commit/6bd0412a11a1a55770415fdc76100b3c76a83a94 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **ondemandscanning:** update the api https://github.com/googleapis/google-api-python-client/commit/b77d12d24d17264123231dd86699fceada262440 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **osconfig:** update the api https://github.com/googleapis/google-api-python-client/commit/c541143744c4b077d0a044455a35d0de227a0bf6 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **prod_tt_sasportal:** update the api https://github.com/googleapis/google-api-python-client/commit/1e0f4a6e5e0bfde1ba4c06223d7fb02f63756690 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **redis:** update the api https://github.com/googleapis/google-api-python-client/commit/4350b35f065e8d651839ebcc047cfaec787b4f98 ([38664e8](https://www.github.com/googleapis/google-api-python-client/commit/38664e8dec117413b8d27fc7230eb9c351d2c0de)) - **serviceconsumermanagement:** update the api https://github.com/googleapis/google-api-python-client/commit/e2046363f037151e02020ea178651b814c11761a ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **servicecontrol:** update the api https://github.com/googleapis/google-api-python-client/commit/facd7ecc18c129cf8010d19d3969e8d5b4598dfc ([ab1d6dc](https://www.github.com/googleapis/google-api-python-client/commit/ab1d6dc365fc482d482de197da7f7583afd04bd0)) - **serviceusage:** update the api https://github.com/googleapis/google-api-python-client/commit/b79b21e71246ab6935214ca751125c83b1990167 ([4c9ccb0](https://www.github.com/googleapis/google-api-python-client/commit/4c9ccb08aa866b5402c5e63c70306b5a3c121ba1)) - **sqladmin:** update the api https://github.com/googleapis/google-api-python-client/commit/f2bb5e677634a0866836353bc40b26d40b1d044b ([a940762](https://www.github.com/googleapis/google-api-python-client/commit/a9407624e954e34bfd989f64ed0f5be74c40d4c5)) ##### Bug Fixes - resolve issue where certain artifacts would not be updated ([#​1385](https://www.github.com/googleapis/google-api-python-client/issues/1385)) ([31bbe51](https://www.github.com/googleapis/google-api-python-client/commit/31bbe51739f966491f1be8ab67c500c65c049daf))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 4a7d27a06129..a7cacabf692a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.6.0 +google-api-python-client==2.8.0 google-auth==1.31.0 google-auth-httplib2==0.1.0 From bd356e22cf7f20a70e3a49184e7bd7ff004ba4f6 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Fri, 11 Jun 2021 12:10:07 -0400 Subject: [PATCH 216/323] docs: fix typos (#125) --- language/v1/language_syntax_gcs.py | 2 +- language/v1/language_syntax_text.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py index 4e8a5cc45bfe..32c64edefa6d 100644 --- a/language/v1/language_syntax_gcs.py +++ b/language/v1/language_syntax_gcs.py @@ -62,7 +62,7 @@ def sample_analyze_syntax(gcs_content_uri): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Part of speech is defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index c3eb9383cf6f..132c577922bf 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -61,7 +61,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Part of speech is defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. From fb564e7709935945208cdcd168455924f94405a3 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 16 Jun 2021 14:52:02 +0200 Subject: [PATCH 217/323] chore(deps): update dependency google-api-python-client to v2.9.0 (#128) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.8.0` -> `==2.9.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.9.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.9.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.9.0/compatibility-slim/2.8.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.9.0/confidence-slim/2.8.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.9.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​290-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev280v290-2021-06-12) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.8.0...v2.9.0) ##### Features - **analyticsadmin:** update the api https://github.com/googleapis/google-api-python-client/commit/3ed78879365ebef411b2748be8b5d52c047210eb ([33237a8](https://www.github.com/googleapis/google-api-python-client/commit/33237a8250e3becfaa1e4b5f67ef0c887cfc44a9)) - **analyticsadmin:** update the api https://github.com/googleapis/google-api-python-client/commit/a715d2b2c5d5535f9317c5b3922350de2bfb883a ([0f0918f](https://www.github.com/googleapis/google-api-python-client/commit/0f0918f92a699753b52c77dd236ad84ee00a32a7)) - **apigee:** update the api https://github.com/googleapis/google-api-python-client/commit/9fcf80b4e92dca6ebc251781c69764e42aa186b3 ([0f0918f](https://www.github.com/googleapis/google-api-python-client/commit/0f0918f92a699753b52c77dd236ad84ee00a32a7)) - **appengine:** update the api https://github.com/googleapis/google-api-python-client/commit/ffcf86035a751e98a763c8a2d54b70d3a55ca14d ([26aa9e2](https://www.github.com/googleapis/google-api-python-client/commit/26aa9e282e30ca9c8797ee5346cbe9c0b9ca65a7)) - **chat:** update the api https://github.com/googleapis/google-api-python-client/commit/47ff8a5cac1b7dbd95c6f2b970a74629f700d4fc ([0f0918f](https://www.github.com/googleapis/google-api-python-client/commit/0f0918f92a699753b52c77dd236ad84ee00a32a7)) - **composer:** update the api https://github.com/googleapis/google-api-python-client/commit/4862529435851dbb106efa0311c2b7515d2ad2ea ([33237a8](https://www.github.com/googleapis/google-api-python-client/commit/33237a8250e3becfaa1e4b5f67ef0c887cfc44a9)) - **containeranalysis:** update the api https://github.com/googleapis/google-api-python-client/commit/9a1c70b7df3e074fc9fbd0eebdaf75a91046078c ([26aa9e2](https://www.github.com/googleapis/google-api-python-client/commit/26aa9e282e30ca9c8797ee5346cbe9c0b9ca65a7)) - **documentai:** update the api https://github.com/googleapis/google-api-python-client/commit/07a6e774ac185442a99437896eaee774946b5846 ([26aa9e2](https://www.github.com/googleapis/google-api-python-client/commit/26aa9e282e30ca9c8797ee5346cbe9c0b9ca65a7)) - **drive:** update the api https://github.com/googleapis/google-api-python-client/commit/773910fdf25b084aa3623d24fe99c8a1330fbecb ([26aa9e2](https://www.github.com/googleapis/google-api-python-client/commit/26aa9e282e30ca9c8797ee5346cbe9c0b9ca65a7)) - **genomics:** update the api https://github.com/googleapis/google-api-python-client/commit/8a1c8a67e7e5b76581cfa95ffa14c01019c305af ([33237a8](https://www.github.com/googleapis/google-api-python-client/commit/33237a8250e3becfaa1e4b5f67ef0c887cfc44a9)) - **gkehub:** update the api https://github.com/googleapis/google-api-python-client/commit/0fd49e0d39455077e39d850ac464635034d253b8 ([33237a8](https://www.github.com/googleapis/google-api-python-client/commit/33237a8250e3becfaa1e4b5f67ef0c887cfc44a9)) - **managedidentities:** update the api https://github.com/googleapis/google-api-python-client/commit/0927c1989574ae4272e4f753f4d55c88af62d8f2 ([c3f8675](https://www.github.com/googleapis/google-api-python-client/commit/c3f86757bccb6b42552f87d37a645651c58d6c7a)) - **managedidentities:** update the api https://github.com/googleapis/google-api-python-client/commit/e96adbb1ba3e4e56d916cc28474f85543f17ad0e ([26aa9e2](https://www.github.com/googleapis/google-api-python-client/commit/26aa9e282e30ca9c8797ee5346cbe9c0b9ca65a7)) - **spanner:** update the api https://github.com/googleapis/google-api-python-client/commit/87da2f3605ec1b8986324cddc33f2b5601d3e896 ([26aa9e2](https://www.github.com/googleapis/google-api-python-client/commit/26aa9e282e30ca9c8797ee5346cbe9c0b9ca65a7)) ##### Bug Fixes - update content-length header for next page ([#​1404](https://www.github.com/googleapis/google-api-python-client/issues/1404)) ([8019f2f](https://www.github.com/googleapis/google-api-python-client/commit/8019f2f96abc6a4375873becb2f17b399f738654)), closes [#​1403](https://www.github.com/googleapis/google-api-python-client/issues/1403)
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index a7cacabf692a..46527bb026d8 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.8.0 +google-api-python-client==2.9.0 google-auth==1.31.0 google-auth-httplib2==0.1.0 From 940c69b9e8f71d917dc589cb9e763b1e10934409 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sun, 20 Jun 2021 02:58:02 +0200 Subject: [PATCH 218/323] chore(deps): update dependency google-cloud-language to v2.1.0 (#130) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-cloud-language](https://togithub.com/googleapis/python-language) | `==2.0.0` -> `==2.1.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-cloud-language/2.1.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-cloud-language/2.1.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-cloud-language/2.1.0/compatibility-slim/2.0.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-cloud-language/2.1.0/confidence-slim/2.0.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/python-language ### [`v2.1.0`](https://togithub.com/googleapis/python-language/blob/master/CHANGELOG.md#​210-httpswwwgithubcomgoogleapispython-languagecomparev140v210-2021-06-16) [Compare Source](https://togithub.com/googleapis/python-language/compare/v2.0.0...v2.1.0) ##### Features - add 'from_service_account_info' factory to clients ([cc8a180](https://www.github.com/googleapis/python-language/commit/cc8a18032af7c8d8bf45130898eeae7efb17a91e)) - add common resource helper methods; expose client transport ([#​55](https://www.github.com/googleapis/python-language/issues/55)) ([8dde55c](https://www.github.com/googleapis/python-language/commit/8dde55cdd0e956c333039c0b74e49a06dd6ad33b)) - add from_service_account_info factory and fix sphinx identifiers ([#​66](https://www.github.com/googleapis/python-language/issues/66)) ([cc8a180](https://www.github.com/googleapis/python-language/commit/cc8a18032af7c8d8bf45130898eeae7efb17a91e)) - support self-signed JWT flow for service accounts ([0dcb15e](https://www.github.com/googleapis/python-language/commit/0dcb15eb46b60bd816a6919464be1331c2c8de41)) ##### Bug Fixes - add async client to %name\_%version/init.py ([0dcb15e](https://www.github.com/googleapis/python-language/commit/0dcb15eb46b60bd816a6919464be1331c2c8de41)) - adds underscore to "type" to NL API samples ([#​49](https://www.github.com/googleapis/python-language/issues/49)) ([36aa320](https://www.github.com/googleapis/python-language/commit/36aa320bf3e0018d66a7d0c91ce4733f20e9acc0)) - **deps:** add packaging requirement ([#​113](https://www.github.com/googleapis/python-language/issues/113)) ([7e711ac](https://www.github.com/googleapis/python-language/commit/7e711ac63c95c1018d24c7c4db3bc02c191efcfc)) - fix sphinx identifiers ([cc8a180](https://www.github.com/googleapis/python-language/commit/cc8a18032af7c8d8bf45130898eeae7efb17a91e)) - remove client recv msg limit fix: add enums to `types/__init__.py` ([#​62](https://www.github.com/googleapis/python-language/issues/62)) ([3476c0f](https://www.github.com/googleapis/python-language/commit/3476c0f72529cbcbe61ea5c7e6a22291777bed7e)) - use correct retry deadlines ([#​83](https://www.github.com/googleapis/python-language/issues/83)) ([e2be2d8](https://www.github.com/googleapis/python-language/commit/e2be2d8ecf849940f2ea066655fda3bee68d8a74)) ##### Documentation - fix typos ([#​125](https://www.github.com/googleapis/python-language/issues/125)) ([788176f](https://www.github.com/googleapis/python-language/commit/788176feff5fb541e0d16f236b10b765d04ecb98)) ##### Miscellaneous Chores - release as 2.1.0 ([#​126](https://www.github.com/googleapis/python-language/issues/126)) ([92fa7f9](https://www.github.com/googleapis/python-language/commit/92fa7f995013c302f3bd3eb6bec53d92d8d9990c))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 328dc7a517b7..5c6d9db7a9e1 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.0.0 +google-cloud-language==2.1.0 numpy==1.20.1; python_version > '3.6' numpy==1.19.5; python_version <= '3.6' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 83a8cba4735e..d79946ba3a7c 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.0.0 +google-cloud-language==2.1.0 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 83a8cba4735e..d79946ba3a7c 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.0.0 +google-cloud-language==2.1.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 83a8cba4735e..d79946ba3a7c 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.0.0 +google-cloud-language==2.1.0 From 8624ffcfd9cb277830a940dde289c56f7d018d94 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 26 Jun 2021 13:42:10 +0200 Subject: [PATCH 219/323] chore(deps): update dependency google-api-python-client to v2.10.0 (#139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.9.0` -> `==2.10.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.10.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.10.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.10.0/compatibility-slim/2.9.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.10.0/confidence-slim/2.9.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.10.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​2100-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev290v2100-2021-06-22) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.9.0...v2.10.0) ##### Features - **analyticsadmin:** update the api https://github.com/googleapis/google-api-python-client/commit/5a2e42e9a4631216c4883d5538c970a5faad59eb ([b1eafb3](https://www.github.com/googleapis/google-api-python-client/commit/b1eafb327669474d202bb2a430ed9e9561102db3)) - **androidmanagement:** update the api https://github.com/googleapis/google-api-python-client/commit/5fcc274bcd4a9a71a0a568e2771c443a2b2b20b0 ([b1eafb3](https://www.github.com/googleapis/google-api-python-client/commit/b1eafb327669474d202bb2a430ed9e9561102db3)) - **bigqueryreservation:** update the api https://github.com/googleapis/google-api-python-client/commit/63c00f6819408b943c2a7cc4bd2185828be173c6 ([3659137](https://www.github.com/googleapis/google-api-python-client/commit/365913780592552488cc5792d26b3f22b9e9ed1b)) - **dialogflow:** update the api https://github.com/googleapis/google-api-python-client/commit/512fc42343fa946889ec155456a05f0d64969903 ([b1eafb3](https://www.github.com/googleapis/google-api-python-client/commit/b1eafb327669474d202bb2a430ed9e9561102db3)) - **firebaserules:** update the api https://github.com/googleapis/google-api-python-client/commit/7b2000437a01ecd25e4ba571049f62c5b6dc9d63 ([3659137](https://www.github.com/googleapis/google-api-python-client/commit/365913780592552488cc5792d26b3f22b9e9ed1b)) - **iap:** update the api https://github.com/googleapis/google-api-python-client/commit/18550fd0501057584ef6d2fa329f09b75dad97d8 ([3659137](https://www.github.com/googleapis/google-api-python-client/commit/365913780592552488cc5792d26b3f22b9e9ed1b)) - **keep:** update the api https://github.com/googleapis/google-api-python-client/commit/45eb6dac450c1055a6ced84332529b70b0a8c831 ([b1eafb3](https://www.github.com/googleapis/google-api-python-client/commit/b1eafb327669474d202bb2a430ed9e9561102db3)) - **managedidentities:** update the api https://github.com/googleapis/google-api-python-client/commit/d2220014e787c2a2c90808cfd1e49a25cd783e72 ([3659137](https://www.github.com/googleapis/google-api-python-client/commit/365913780592552488cc5792d26b3f22b9e9ed1b)) ##### Bug Fixes - **smartdevicemanagement:** update the api https://github.com/googleapis/google-api-python-client/commit/772982044da691f9116073855e692f7793edacce ([b1eafb3](https://www.github.com/googleapis/google-api-python-client/commit/b1eafb327669474d202bb2a430ed9e9561102db3))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 46527bb026d8..9d43b1ba7be1 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.9.0 +google-api-python-client==2.10.0 google-auth==1.31.0 google-auth-httplib2==0.1.0 From cd0bd70bd1281b76d4411fd2c7cb25d182945d0c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 26 Jun 2021 13:52:23 +0200 Subject: [PATCH 220/323] chore(deps): update dependency google-auth to v1.32.0 (#137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | `==1.31.0` -> `==1.32.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.0/compatibility-slim/1.31.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.0/confidence-slim/1.31.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.32.0`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1320-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1310v1320-2021-06-16) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.31.0...v1.32.0) ##### Features - allow scopes for self signed jwt ([#​776](https://www.github.com/googleapis/google-auth-library-python/issues/776)) ([2cfe655](https://www.github.com/googleapis/google-auth-library-python/commit/2cfe655bba837170abc07701557a1a5e0fe3294e))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 9d43b1ba7be1..e9032aab904f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.10.0 -google-auth==1.31.0 +google-auth==1.32.0 google-auth-httplib2==0.1.0 From d8016d94ebe2b99bf2bf6d906ee88b414fb4ef9f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 2 Jul 2021 20:15:20 +0200 Subject: [PATCH 221/323] chore(deps): update dependency google-cloud-language to v2.2.0 (#146) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 5c6d9db7a9e1..0020383cbb9a 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.1.0 +google-cloud-language==2.2.0 numpy==1.20.1; python_version > '3.6' numpy==1.19.5; python_version <= '3.6' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index d79946ba3a7c..62e406fc91cd 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.1.0 +google-cloud-language==2.2.0 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index d79946ba3a7c..62e406fc91cd 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.1.0 +google-cloud-language==2.2.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index d79946ba3a7c..62e406fc91cd 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.1.0 +google-cloud-language==2.2.0 From 8f9714aa8df58f06a202571b2a2e74ecb1cd63f2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 3 Jul 2021 13:40:23 +0200 Subject: [PATCH 222/323] chore(deps): update dependency google-api-python-client to v2.11.0 (#144) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.10.0` -> `==2.11.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.11.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.11.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.11.0/compatibility-slim/2.10.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.11.0/confidence-slim/2.10.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.11.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​2110-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev2100v2110-2021-06-29) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.10.0...v2.11.0) ##### Features - **admin:** update the api https://github.com/googleapis/google-api-python-client/commit/1534f8926019f43dc87a29c1ca32191884556e3b ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **alertcenter:** update the api https://github.com/googleapis/google-api-python-client/commit/7a488d3f0deef3e1f106cff63b1e4f66ad1727bb ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **analyticsadmin:** update the api https://github.com/googleapis/google-api-python-client/commit/934358e5c041ffd1449e7c744463e61e94381ed5 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **analyticsdata:** update the api https://github.com/googleapis/google-api-python-client/commit/40f712130674cec09c1dd7560f69a330a335b226 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **androiddeviceprovisioning:** update the api https://github.com/googleapis/google-api-python-client/commit/81a0002a7051aeab647a3296fb18ce7973bf7137 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **apigee:** update the api https://github.com/googleapis/google-api-python-client/commit/2e6c78a93b2c0ee7001eb163ec95f9afc8f35575 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **appengine:** update the api https://github.com/googleapis/google-api-python-client/commit/125f74a61a94af17c01930841a79db46d3a059c5 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **bigquery:** update the api https://github.com/googleapis/google-api-python-client/commit/59c51e319602741632201d2ce61a6b03f13e4003 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **cloudasset:** update the api https://github.com/googleapis/google-api-python-client/commit/e615264971ccee6eb9b450fe3d85614209c0fee8 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **cloudbuild:** update the api https://github.com/googleapis/google-api-python-client/commit/ceddaccf23eb8b809688907cfdef8906cd77d65d ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **cloudidentity:** update the api https://github.com/googleapis/google-api-python-client/commit/22cd08b69b034c2cdfd854e1ac784f834539db3a ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **container:** update the api https://github.com/googleapis/google-api-python-client/commit/f494c63a42dc418559292c6269289317d9cebc23 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **documentai:** update the api https://github.com/googleapis/google-api-python-client/commit/e8aaabbc7670aefc4a745916fccb31424745f748 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **drive:** update the api https://github.com/googleapis/google-api-python-client/commit/72cab88ce591d906ea1cfcbe4dee354cccb623f2 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **file:** update the api https://github.com/googleapis/google-api-python-client/commit/0cd409a2d15c68aca3ea864400fc4772b9b4e503 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **firebaseappcheck:** update the api https://github.com/googleapis/google-api-python-client/commit/9a0131b2326327109d1ba7af97b1f4808dd7a898 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **healthcare:** update the api https://github.com/googleapis/google-api-python-client/commit/45ee6b28b86a43f44c707e15a7e06fdf8fce6a0f ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **ideahub:** update the api https://github.com/googleapis/google-api-python-client/commit/73b86d9d37f33aeaed74772d0319ba1350e54ed5 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **managedidentities:** update the api https://github.com/googleapis/google-api-python-client/commit/a07ed4558c93cb8f7fae49c7b353f46ccfea6c10 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **memcache:** update the api https://github.com/googleapis/google-api-python-client/commit/665ce5b47b9b3238dcfa201b9343bf6447df5994 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **metastore:** update the api https://github.com/googleapis/google-api-python-client/commit/9fd5ffbf37fb052323f5fa68d307c68391c519ac ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **ml:** update the api https://github.com/googleapis/google-api-python-client/commit/cf54d564915a558569c093287b448a7819e215f6 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **monitoring:** update the api https://github.com/googleapis/google-api-python-client/commit/d1ffbfc041f23f904cd8bc35a450871b2909473b ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **networkconnectivity:** update the api https://github.com/googleapis/google-api-python-client/commit/2cc462638aec61f4e775bfce883e725b104eeabb ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **notebooks:** update the api https://github.com/googleapis/google-api-python-client/commit/831ba938855aa4bdefafedf63e01af43350e7ed2 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **ondemandscanning:** update the api https://github.com/googleapis/google-api-python-client/commit/c04b4023477393cbb41984b14e0c734fc8587d45 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **paymentsresellersubscription:** update the api https://github.com/googleapis/google-api-python-client/commit/2cd5b1c2ef524f3ab00630508710cce7bee53574 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **prod_tt_sasportal:** update the api https://github.com/googleapis/google-api-python-client/commit/8b6bd24e57a79f470c750ad04052f79a3cafe0fa ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **realtimebidding:** update the api https://github.com/googleapis/google-api-python-client/commit/fd514dc8d86182dc17698f3293144928535f709c ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **reseller:** update the api https://github.com/googleapis/google-api-python-client/commit/20226c4401956732772e2a563c7920666135e605 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **sasportal:** update the api https://github.com/googleapis/google-api-python-client/commit/38d5156350b79a9933b2806f4bbe443043a33185 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **sts:** update the api https://github.com/googleapis/google-api-python-client/commit/190e13ebe5a4660d8825d3a8708559077a342bdf ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **transcoder:** update the api https://github.com/googleapis/google-api-python-client/commit/fbcacce6a17c1cae45b22f4a2058e730ec84b55a ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127)) - **youtube:** update the api https://github.com/googleapis/google-api-python-client/commit/5046950872559fe93b954dc9a4f71fd724176247 ([04bafe1](https://www.github.com/googleapis/google-api-python-client/commit/04bafe14efe7a6d1a7da03de89312062a4afa127))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e9032aab904f..411e17cd63ac 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.10.0 +google-api-python-client==2.11.0 google-auth==1.32.0 google-auth-httplib2==0.1.0 From e0d80afa82546b37d4a8b69bf6718fd2a3949eb8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 7 Jul 2021 17:22:46 +0200 Subject: [PATCH 223/323] chore(deps): update dependency google-api-python-client to v2.12.0 (#147) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.11.0` -> `==2.12.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.12.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.12.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.12.0/compatibility-slim/2.11.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.12.0/confidence-slim/2.11.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.12.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​2120-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev2110v2120-2021-07-06) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.11.0...v2.12.0) ##### Features - **artifactregistry:** update the api https://github.com/googleapis/google-api-python-client/commit/bc9a38bf901a63525fb4c7b1e94fd4ce5fb441c3 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **chat:** update the api https://github.com/googleapis/google-api-python-client/commit/eea3c5c177aaded427fd3b5bab80812bf748ef79 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **cloudasset:** update the api https://github.com/googleapis/google-api-python-client/commit/2e31dd0b58d3c656df5aaa042994c637d0100f97 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **cloudbuild:** update the api https://github.com/googleapis/google-api-python-client/commit/3a3b420d53aabe1fdf6ddca483a3d164f72d6268 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **composer:** update the api https://github.com/googleapis/google-api-python-client/commit/78c0d8decbe640c522c45850c97002e7da12f4e0 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **container:** update the api https://github.com/googleapis/google-api-python-client/commit/a54737fe763fd288e54505faace58040cbf8920b ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **datafusion:** update the api https://github.com/googleapis/google-api-python-client/commit/f6bf3c6b92fbf7072798b987998bf55ee9276389 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **dataproc:** update the api https://github.com/googleapis/google-api-python-client/commit/3fde9a3604e4811ce02f1062dcee9cef35b1ad51 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **documentai:** update the api https://github.com/googleapis/google-api-python-client/commit/79c556d389889fb0f48c8cc5ad5ab4a2caaab603 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **groupssettings:** update the api https://github.com/googleapis/google-api-python-client/commit/d537f96a20a699629fa85fbdeadb74ead3b32699 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **logging:** update the api https://github.com/googleapis/google-api-python-client/commit/d3548c505e4b1065365584493d15f21a19639626 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **monitoring:** update the api https://github.com/googleapis/google-api-python-client/commit/d24af68a9621fda9d7a576d3615178604a1482d2 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **paymentsresellersubscription:** update the api https://github.com/googleapis/google-api-python-client/commit/cff9039529278d95cee936826b5406867c638430 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **redis:** update the api https://github.com/googleapis/google-api-python-client/commit/46102d1726393f872420820e6200bb83cefd74b6 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **run:** update the api https://github.com/googleapis/google-api-python-client/commit/db18e29c7f616f212121960fe8efd6fb7cdf9b22 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **slides:** update the api https://github.com/googleapis/google-api-python-client/commit/68634cd565914e6003c851ec5f43fa2ff290afca ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **spanner:** update the api https://github.com/googleapis/google-api-python-client/commit/289512124fc77a69957b912f06e9c3d315aa0526 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193)) - **storagetransfer:** update the api https://github.com/googleapis/google-api-python-client/commit/24895f156f10c03f2da686be95d8c70ea34008a3 ([a933dad](https://www.github.com/googleapis/google-api-python-client/commit/a933dad7e72d3093be480ce6af3965f41db1d193))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 411e17cd63ac..517ba4afeb1b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.11.0 +google-api-python-client==2.12.0 google-auth==1.32.0 google-auth-httplib2==0.1.0 From 45ee821812bc7b61f3cb5c31260543aab9b0e587 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 12 Jul 2021 22:08:11 +0200 Subject: [PATCH 224/323] chore(deps): update dependency google-auth to v1.32.1 (#145) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | `==1.32.0` -> `==1.32.1` | [![age](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.1/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.1/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.1/compatibility-slim/1.32.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-auth/1.32.1/confidence-slim/1.32.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.32.1`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1321-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1320v1321-2021-06-30) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.32.0...v1.32.1)
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 517ba4afeb1b..db263f9551c3 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.12.0 -google-auth==1.32.0 +google-auth==1.32.1 google-auth-httplib2==0.1.0 From ab4601dd386c05fb5e908e1677717eaa1d247e8a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 16 Jul 2021 00:15:17 +0200 Subject: [PATCH 225/323] chore(deps): update dependency google-auth to v1.33.0 (#159) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index db263f9551c3..0b69cc0948ca 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.12.0 -google-auth==1.32.1 +google-auth==1.33.0 google-auth-httplib2==0.1.0 From 58741c313b882ef74abec644c5d6a3b5bb5cc0ff Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 16 Jul 2021 16:06:23 +0200 Subject: [PATCH 226/323] chore(deps): update dependency google-api-python-client to v2.13.0 (#151) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.12.0` -> `==2.13.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.13.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.13.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.13.0/compatibility-slim/2.12.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.13.0/confidence-slim/2.12.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.13.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​2130-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev2120v2130-2021-07-13) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.12.0...v2.13.0) ##### Features - **analyticsadmin:** update the api https://github.com/googleapis/google-api-python-client/commit/96675a8d9158ec13353fe241f858201fc51b784d ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **composer:** update the api https://github.com/googleapis/google-api-python-client/commit/add2fbdc3afb6696537eb087bc1d79df9194a37a ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **container:** update the api https://github.com/googleapis/google-api-python-client/commit/f8fae98db6d1943411b1a6c0f5a65dea336569f6 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **content:** update the api https://github.com/googleapis/google-api-python-client/commit/0814e009a4a11800db5b4afd7b6260e504c98047 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **datacatalog:** update the api https://github.com/googleapis/google-api-python-client/commit/99706059e58bb3d616253a1af2cd162b5a0b0279 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **dataflow:** update the api https://github.com/googleapis/google-api-python-client/commit/d5f09ef30392532bcfdd82901148bdd3ac6eec01 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **docs:** update the api https://github.com/googleapis/google-api-python-client/commit/dc66f4cafba86baff6149b2f6e59ae1888006911 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **file:** update the api https://github.com/googleapis/google-api-python-client/commit/523fc5c900f53489d56400deb650f6586c9681a0 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **firebasehosting:** update the api https://github.com/googleapis/google-api-python-client/commit/c83ac386b65f82e7ba29851d56b496b09a29cf98 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **healthcare:** update the api https://github.com/googleapis/google-api-python-client/commit/a407471b14349b8c08018196041568f2a35f8d4f ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **ideahub:** update the api https://github.com/googleapis/google-api-python-client/commit/c6b0d83940f238b1330896240492e8db397dcd15 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **managedidentities:** update the api https://github.com/googleapis/google-api-python-client/commit/863b333da7848029fd1614fd48b46cfbe12afcd5 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **memcache:** update the api https://github.com/googleapis/google-api-python-client/commit/17dc001e4649f54944066ce153e3c552c850a146 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **metastore:** update the api https://github.com/googleapis/google-api-python-client/commit/f3a76c9359babc48cc0b76ce7e3be0711ba028ae ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **slides:** update the api https://github.com/googleapis/google-api-python-client/commit/314d61b9ef8c5c30f9756462504dc0df92284cb2 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **sqladmin:** update the api https://github.com/googleapis/google-api-python-client/commit/62784e0b1b5752b480afe1ddd77dcf412bb35dbb ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **tpu:** update the api https://github.com/googleapis/google-api-python-client/commit/16bf712cca4a393d96e4135de3d02e5005051b6d ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) - **youtube:** update the api https://github.com/googleapis/google-api-python-client/commit/ec21dff96d9538ad6c7f9b318eca88178533aa95 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) ##### Bug Fixes - **keep:** update the api https://github.com/googleapis/google-api-python-client/commit/08fee732e96d3220e624c8fca7b8a9b0c0bcb146 ([1a4514d](https://www.github.com/googleapis/google-api-python-client/commit/1a4514d2862f81fc97e424cd550c286cda0fc859)) ##### Documentation - add recommendation to use v2.x and static discovery artifacts ([#​1434](https://www.github.com/googleapis/google-api-python-client/issues/1434)) ([ca7328c](https://www.github.com/googleapis/google-api-python-client/commit/ca7328cb5340ea282a3d98782926a0b6881a33ed))
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0b69cc0948ca..3e6cecbd84d2 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.12.0 +google-api-python-client==2.13.0 google-auth==1.33.0 google-auth-httplib2==0.1.0 From 494496194b183881d4dcbe22a14d720512169ce8 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 22 Jul 2021 13:52:23 +0000 Subject: [PATCH 227/323] feat: add Samples section to CONTRIBUTING.rst (#161) Source-Link: https://github.com/googleapis/synthtool/commit/52e4e46eff2a0b70e3ff5506a02929d089d077d4 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:6186535cbdbf6b9fe61f00294929221d060634dae4a0795c1cefdbc995b2d605 --- language/snippets/api/noxfile.py | 5 +++-- language/snippets/classify_text/noxfile.py | 5 +++-- language/snippets/cloud-client/v1/noxfile.py | 5 +++-- language/snippets/generated-samples/v1/noxfile.py | 5 +++-- language/snippets/sentiment/noxfile.py | 5 +++-- 5 files changed, 15 insertions(+), 10 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 5ff9e1db5808..6a8ccdae22c9 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -28,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -159,7 +160,7 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: - session.install("black") + session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 5ff9e1db5808..6a8ccdae22c9 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -28,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -159,7 +160,7 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: - session.install("black") + session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 5ff9e1db5808..6a8ccdae22c9 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -28,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -159,7 +160,7 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: - session.install("black") + session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 5ff9e1db5808..6a8ccdae22c9 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -28,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -159,7 +160,7 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: - session.install("black") + session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 5ff9e1db5808..6a8ccdae22c9 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -28,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -159,7 +160,7 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: - session.install("black") + session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) From f1ed1a51cf86f6931c0cf1f77ad875f239b270ca Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 27 Jul 2021 12:58:17 +0200 Subject: [PATCH 228/323] chore(deps): update dependency google-api-python-client to v2.14.1 (#163) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 3e6cecbd84d2..6274782ddcd4 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.13.0 +google-api-python-client==2.14.1 google-auth==1.33.0 google-auth-httplib2==0.1.0 From 61a1e3aaedf535ce87248c85e1ca58e50dccf7ff Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 27 Jul 2021 13:32:53 +0200 Subject: [PATCH 229/323] chore(deps): update dependency google-auth to v1.33.1 (#164) Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 6274782ddcd4..cbc34d893f46 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.14.1 -google-auth==1.33.0 +google-auth==1.33.1 google-auth-httplib2==0.1.0 From cc1eb0274c64cb6f3dec86fc4c8e886677266a91 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 27 Jul 2021 13:33:13 +0200 Subject: [PATCH 230/323] chore(deps): update dependency google-cloud-language to v2.2.1 (#165) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 0020383cbb9a..fc435773449f 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.2.0 +google-cloud-language==2.2.1 numpy==1.20.1; python_version > '3.6' numpy==1.19.5; python_version <= '3.6' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 62e406fc91cd..7e8a9a022143 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.0 +google-cloud-language==2.2.1 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 62e406fc91cd..7e8a9a022143 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.0 +google-cloud-language==2.2.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 62e406fc91cd..7e8a9a022143 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.0 +google-cloud-language==2.2.1 From 724ef071d6994f449db99d2795abdb66ae3e37c9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 27 Jul 2021 13:42:19 +0200 Subject: [PATCH 231/323] chore(deps): update dependency google-api-python-client to v2.15.0 (#169) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index cbc34d893f46..59994db0048c 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.14.1 +google-api-python-client==2.15.0 google-auth==1.33.1 google-auth-httplib2==0.1.0 From 6c343fe7b1765735e7fa80fef8a0b54ada5e40a6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 28 Jul 2021 12:59:27 +0200 Subject: [PATCH 232/323] chore(deps): update dependency google-auth to v1.34.0 (#171) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 59994db0048c..bb55768af3c3 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.15.0 -google-auth==1.33.1 +google-auth==1.34.0 google-auth-httplib2==0.1.0 From b908dfb1b0842e9b14b6c984fd7d35282ad5bf49 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 28 Jul 2021 16:52:52 +0200 Subject: [PATCH 233/323] chore(deps): update dependency google-cloud-language to v2.2.2 (#173) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index fc435773449f..2323f5be9b63 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.2.1 +google-cloud-language==2.2.2 numpy==1.20.1; python_version > '3.6' numpy==1.19.5; python_version <= '3.6' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 7e8a9a022143..fc0ee401d4c5 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.1 +google-cloud-language==2.2.2 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 7e8a9a022143..fc0ee401d4c5 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.1 +google-cloud-language==2.2.2 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 7e8a9a022143..fc0ee401d4c5 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.1 +google-cloud-language==2.2.2 From 067a056f163f310ebab74c5c459496e449e230d2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 11 Aug 2021 16:32:30 +0000 Subject: [PATCH 234/323] chore: fix INSTALL_LIBRARY_FROM_SOURCE in noxfile.py (#175) Source-Link: https://github.com/googleapis/synthtool/commit/6252f2cd074c38f37b44abe5e96d128733eb1b61 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:50e35228649c47b6ca82aa0be3ff9eb2afce51c82b66c4a03fe4afeb5ff6c0fc --- language/snippets/api/noxfile.py | 2 +- language/snippets/classify_text/noxfile.py | 2 +- language/snippets/cloud-client/v1/noxfile.py | 2 +- language/snippets/generated-samples/v1/noxfile.py | 2 +- language/snippets/sentiment/noxfile.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 6a8ccdae22c9..125bb619cc49 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -96,7 +96,7 @@ def get_pytest_env_vars() -> Dict[str, str]: TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") # # Style Checks # diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 6a8ccdae22c9..125bb619cc49 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -96,7 +96,7 @@ def get_pytest_env_vars() -> Dict[str, str]: TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") # # Style Checks # diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 6a8ccdae22c9..125bb619cc49 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -96,7 +96,7 @@ def get_pytest_env_vars() -> Dict[str, str]: TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") # # Style Checks # diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 6a8ccdae22c9..125bb619cc49 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -96,7 +96,7 @@ def get_pytest_env_vars() -> Dict[str, str]: TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") # # Style Checks # diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 6a8ccdae22c9..125bb619cc49 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -96,7 +96,7 @@ def get_pytest_env_vars() -> Dict[str, str]: TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") # # Style Checks # From 5aa1f38358b0181c82cff8897656054fa1b87e58 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 13 Aug 2021 11:44:44 -0400 Subject: [PATCH 235/323] chore: drop mention of Python 2.7 from templates (#177) Source-Link: https://github.com/googleapis/synthtool/commit/facee4cc1ea096cd8bcc008bb85929daa7c414c0 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:9743664022bd63a8084be67f144898314c7ca12f0a03e422ac17c733c129d803 Co-authored-by: Owl Bot --- language/snippets/api/noxfile.py | 6 +++--- language/snippets/classify_text/noxfile.py | 6 +++--- language/snippets/cloud-client/v1/noxfile.py | 6 +++--- language/snippets/generated-samples/v1/noxfile.py | 6 +++--- language/snippets/sentiment/noxfile.py | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 125bb619cc49..e73436a15626 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -39,7 +39,7 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], + 'ignored_versions': [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them @@ -88,8 +88,8 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 125bb619cc49..e73436a15626 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -39,7 +39,7 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], + 'ignored_versions': [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them @@ -88,8 +88,8 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 125bb619cc49..e73436a15626 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -39,7 +39,7 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], + 'ignored_versions': [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them @@ -88,8 +88,8 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 125bb619cc49..e73436a15626 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -39,7 +39,7 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], + 'ignored_versions': [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them @@ -88,8 +88,8 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 125bb619cc49..e73436a15626 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -39,7 +39,7 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], + 'ignored_versions': [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them @@ -88,8 +88,8 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] From 05d0d2c52ad88f993200fb0e41bd3773d9d88fd4 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 17 Aug 2021 17:58:57 +0200 Subject: [PATCH 236/323] chore(deps): update dependency google-auth to v1.35.0 (#178) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index bb55768af3c3..04a91b44919e 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.15.0 -google-auth==1.34.0 +google-auth==1.35.0 google-auth-httplib2==0.1.0 From 9b861df2063a23974fe6ddf47b3c47e0a69b4d34 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 25 Aug 2021 15:23:14 +0200 Subject: [PATCH 237/323] chore(deps): update dependency google-auth to v2 (#180) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 04a91b44919e..5328e947a4d7 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.15.0 -google-auth==1.35.0 +google-auth==2.0.1 google-auth-httplib2==0.1.0 From 84f0d26de4b68a83fcf07257d1b4c8181a8d46e7 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 25 Aug 2021 15:47:19 +0200 Subject: [PATCH 238/323] chore(deps): update dependency google-api-python-client to v2.18.0 (#179) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 5328e947a4d7..a1c3b1941b0a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.15.0 +google-api-python-client==2.18.0 google-auth==2.0.1 google-auth-httplib2==0.1.0 From 067ab335631c925247ac851075aef7c668c8d8a3 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 31 Aug 2021 13:28:32 +0200 Subject: [PATCH 239/323] chore(deps): update dependency pytest to v6.2.5 (#184) --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 From ff987d376f28372a2b34cc6e130617c31150eab8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 31 Aug 2021 13:34:12 +0200 Subject: [PATCH 240/323] chore(deps): update dependency google-api-python-client to v2.19.0 (#185) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | `==2.18.0` -> `==2.19.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.19.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.19.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.19.0/compatibility-slim/2.18.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-api-python-client/2.19.0/confidence-slim/2.18.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/google-api-python-client ### [`v2.19.0`](https://togithub.com/googleapis/google-api-python-client/compare/v2.18.0...v2.19.0) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v2.18.0...v2.19.0)
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-language). --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index a1c3b1941b0a..0230ffad632b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.18.0 +google-api-python-client==2.19.0 google-auth==2.0.1 google-auth-httplib2==0.1.0 From a1f999cddaa636a2dc2e4b6b9f679a0936ba507c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 1 Sep 2021 13:21:11 +0200 Subject: [PATCH 241/323] chore(deps): update dependency google-auth to v2.0.2 (#186) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0230ffad632b..7473ec53132a 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.19.0 -google-auth==2.0.1 +google-auth==2.0.2 google-auth-httplib2==0.1.0 From 5dc0d4f953bee2b3c35bda88c75fcc0ff62fb88b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 2 Sep 2021 12:39:15 +0200 Subject: [PATCH 242/323] chore(deps): update dependency google-api-python-client to v2.19.1 (#189) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 7473ec53132a..094c8e571286 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.19.0 +google-api-python-client==2.19.1 google-auth==2.0.2 google-auth-httplib2==0.1.0 From d6f5d9a4c0cc2722de71fcb6355fbafea514a7b4 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 17 Sep 2021 15:36:56 +0000 Subject: [PATCH 243/323] chore: blacken samples noxfile template (#193) --- language/snippets/api/noxfile.py | 44 +++++++++++-------- language/snippets/classify_text/noxfile.py | 44 +++++++++++-------- language/snippets/cloud-client/v1/noxfile.py | 44 +++++++++++-------- .../snippets/generated-samples/v1/noxfile.py | 44 +++++++++++-------- language/snippets/sentiment/noxfile.py | 44 +++++++++++-------- 5 files changed, 125 insertions(+), 95 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index e73436a15626..b008613f03ff 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -39,17 +39,15 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': [], - + "ignored_versions": [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # If you need to use a specific version of pip, # change pip_version_override to the string representation @@ -57,13 +55,13 @@ "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -78,12 +76,12 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -92,11 +90,14 @@ def get_pytest_env_vars() -> Dict[str, str]: ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) # # Style Checks # @@ -141,7 +142,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -150,9 +151,11 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -165,6 +168,7 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) + # # Sample Tests # @@ -173,7 +177,9 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: if TEST_CONFIG["pip_version_override"]: pip_version = TEST_CONFIG["pip_version_override"] session.install(f"pip=={pip_version}") @@ -203,7 +209,7 @@ def _session_tests(session: nox.sessions.Session, post_install: Callable = None) # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @@ -213,9 +219,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index e73436a15626..b008613f03ff 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -39,17 +39,15 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': [], - + "ignored_versions": [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # If you need to use a specific version of pip, # change pip_version_override to the string representation @@ -57,13 +55,13 @@ "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -78,12 +76,12 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -92,11 +90,14 @@ def get_pytest_env_vars() -> Dict[str, str]: ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) # # Style Checks # @@ -141,7 +142,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -150,9 +151,11 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -165,6 +168,7 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) + # # Sample Tests # @@ -173,7 +177,9 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: if TEST_CONFIG["pip_version_override"]: pip_version = TEST_CONFIG["pip_version_override"] session.install(f"pip=={pip_version}") @@ -203,7 +209,7 @@ def _session_tests(session: nox.sessions.Session, post_install: Callable = None) # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @@ -213,9 +219,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index e73436a15626..b008613f03ff 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -39,17 +39,15 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': [], - + "ignored_versions": [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # If you need to use a specific version of pip, # change pip_version_override to the string representation @@ -57,13 +55,13 @@ "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -78,12 +76,12 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -92,11 +90,14 @@ def get_pytest_env_vars() -> Dict[str, str]: ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) # # Style Checks # @@ -141,7 +142,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -150,9 +151,11 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -165,6 +168,7 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) + # # Sample Tests # @@ -173,7 +177,9 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: if TEST_CONFIG["pip_version_override"]: pip_version = TEST_CONFIG["pip_version_override"] session.install(f"pip=={pip_version}") @@ -203,7 +209,7 @@ def _session_tests(session: nox.sessions.Session, post_install: Callable = None) # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @@ -213,9 +219,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index e73436a15626..b008613f03ff 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -39,17 +39,15 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': [], - + "ignored_versions": [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # If you need to use a specific version of pip, # change pip_version_override to the string representation @@ -57,13 +55,13 @@ "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -78,12 +76,12 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -92,11 +90,14 @@ def get_pytest_env_vars() -> Dict[str, str]: ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) # # Style Checks # @@ -141,7 +142,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -150,9 +151,11 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -165,6 +168,7 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) + # # Sample Tests # @@ -173,7 +177,9 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: if TEST_CONFIG["pip_version_override"]: pip_version = TEST_CONFIG["pip_version_override"] session.install(f"pip=={pip_version}") @@ -203,7 +209,7 @@ def _session_tests(session: nox.sessions.Session, post_install: Callable = None) # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @@ -213,9 +219,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index e73436a15626..b008613f03ff 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -39,17 +39,15 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': [], - + "ignored_versions": [], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # If you need to use a specific version of pip, # change pip_version_override to the string representation @@ -57,13 +55,13 @@ "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -78,12 +76,12 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -92,11 +90,14 @@ def get_pytest_env_vars() -> Dict[str, str]: ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ("True", "true") +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) # # Style Checks # @@ -141,7 +142,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -150,9 +151,11 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -165,6 +168,7 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) + # # Sample Tests # @@ -173,7 +177,9 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: if TEST_CONFIG["pip_version_override"]: pip_version = TEST_CONFIG["pip_version_override"] session.install(f"pip=={pip_version}") @@ -203,7 +209,7 @@ def _session_tests(session: nox.sessions.Session, post_install: Callable = None) # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @@ -213,9 +219,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # From 32bacdbd921bedfa9952bef31c1811386eca8de8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 21 Sep 2021 21:29:36 +0200 Subject: [PATCH 244/323] chore(deps): update all dependencies (#192) --- language/snippets/api/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 094c8e571286..d02277db638b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.19.1 -google-auth==2.0.2 +google-api-python-client==2.22.0 +google-auth==2.1.0 google-auth-httplib2==0.1.0 From 7824814b1a68947f14a037ba986aeed2bc0677fb Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 27 Sep 2021 19:45:30 +0200 Subject: [PATCH 245/323] chore(deps): update dependency google-auth to v2.2.0 (#197) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index d02277db638b..7e4bd82d38e3 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.22.0 -google-auth==2.1.0 +google-auth==2.2.0 google-auth-httplib2==0.1.0 From 8e06646cf28ad8df8e7e9302b6e4414f08715485 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 28 Sep 2021 17:46:14 +0200 Subject: [PATCH 246/323] chore(deps): update dependency google-api-python-client to v2.23.0 (#198) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 7e4bd82d38e3..84c3927dc774 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.22.0 +google-api-python-client==2.23.0 google-auth==2.2.0 google-auth-httplib2==0.1.0 From a906117c159f69fd908a0b7f29acdbc3ea58798f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 29 Sep 2021 16:53:57 +0200 Subject: [PATCH 247/323] chore(deps): update dependency google-auth to v2.2.1 (#199) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 84c3927dc774..2bf9babbc851 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.23.0 -google-auth==2.2.0 +google-auth==2.2.1 google-auth-httplib2==0.1.0 From 7a775c29d79ff6bc513172614a83b86194398edf Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 30 Sep 2021 15:44:41 +0000 Subject: [PATCH 248/323] chore: fail samples nox session if python version is missing (#200) --- language/snippets/api/noxfile.py | 4 ++++ language/snippets/classify_text/noxfile.py | 4 ++++ language/snippets/cloud-client/v1/noxfile.py | 4 ++++ language/snippets/generated-samples/v1/noxfile.py | 4 ++++ language/snippets/sentiment/noxfile.py | 4 ++++ 5 files changed, 20 insertions(+) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index b008613f03ff..1fd8956fbf01 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -98,6 +98,10 @@ def get_pytest_env_vars() -> Dict[str, str]: "True", "true", ) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index b008613f03ff..1fd8956fbf01 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -98,6 +98,10 @@ def get_pytest_env_vars() -> Dict[str, str]: "True", "true", ) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index b008613f03ff..1fd8956fbf01 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -98,6 +98,10 @@ def get_pytest_env_vars() -> Dict[str, str]: "True", "true", ) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index b008613f03ff..1fd8956fbf01 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -98,6 +98,10 @@ def get_pytest_env_vars() -> Dict[str, str]: "True", "true", ) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index b008613f03ff..1fd8956fbf01 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -98,6 +98,10 @@ def get_pytest_env_vars() -> Dict[str, str]: "True", "true", ) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # From 7213781f7eaf7242f927a7990e2bba8fa0912b62 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 5 Oct 2021 16:53:12 +0200 Subject: [PATCH 249/323] chore(deps): update dependency google-api-python-client to v2.24.0 (#201) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 2bf9babbc851..2d20608262a9 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.23.0 +google-api-python-client==2.24.0 google-auth==2.2.1 google-auth-httplib2==0.1.0 From 1e135ed873090ec11c29972612bc36d1c1ab9895 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 9 Oct 2021 10:54:50 -0400 Subject: [PATCH 250/323] chore(python): Add kokoro configs for python 3.10 samples testing (#207) * chore(python): Add kokoro configs for python 3.10 samples testing Source-Link: https://github.com/googleapis/synthtool/commit/c6e69c4726a233ad8d496961ec265d29e54010b7 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:58f73ba196b5414782605236dd0712a73541b44ff2ff4d3a36ec41092dd6fa5b * add constraints files for py3.10 and py3.11 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/noxfile.py | 2 +- language/snippets/classify_text/noxfile.py | 2 +- language/snippets/cloud-client/v1/noxfile.py | 2 +- language/snippets/generated-samples/v1/noxfile.py | 2 +- language/snippets/sentiment/noxfile.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 1fd8956fbf01..93a9122cc457 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 1fd8956fbf01..93a9122cc457 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 1fd8956fbf01..93a9122cc457 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 1fd8956fbf01..93a9122cc457 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 1fd8956fbf01..93a9122cc457 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] From e3f91163d5da9dbdebb9bd1e171f1c5a24998ffb Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 2 Nov 2021 20:31:07 +0100 Subject: [PATCH 251/323] chore(deps): update dependency google-auth to v2.3.0 (#206) Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 2d20608262a9..7e4f4a6b7399 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.24.0 -google-auth==2.2.1 +google-auth==2.3.0 google-auth-httplib2==0.1.0 From 5626a0f52e03b9f07fdb7c8651c3b5b63f16547b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 3 Nov 2021 12:31:21 +0100 Subject: [PATCH 252/323] chore(deps): update all dependencies (#216) --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 7e4f4a6b7399..39362509535f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.24.0 -google-auth==2.3.0 +google-api-python-client==2.29.0 +google-auth==2.3.3 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 2323f5be9b63..0a0e005610bf 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.2.2 +google-cloud-language==2.3.1 numpy==1.20.1; python_version > '3.6' numpy==1.19.5; python_version <= '3.6' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index fc0ee401d4c5..d2b8f2f023fa 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.2 +google-cloud-language==2.3.1 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index fc0ee401d4c5..d2b8f2f023fa 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.2 +google-cloud-language==2.3.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index fc0ee401d4c5..d2b8f2f023fa 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.2.2 +google-cloud-language==2.3.1 From abf8e46237c41dcd63efd5e10bd38c46fdbd8865 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 9 Nov 2021 17:00:26 +0100 Subject: [PATCH 253/323] chore(deps): update dependency google-api-python-client to v2.30.0 (#218) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 39362509535f..decb2aef238c 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.29.0 +google-api-python-client==2.30.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 From a952f235551148946ad5b3698cb307ba29801c4e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 10 Nov 2021 20:37:03 -0500 Subject: [PATCH 254/323] chore(python): run blacken session for all directories with a noxfile (#220) Source-Link: https://github.com/googleapis/synthtool/commit/bc0de6ee2489da6fb8eafd021a8c58b5cc30c947 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:39ad8c0570e4f5d2d3124a509de4fe975e799e2b97e0f58aed88f8880d5a8b60 Co-authored-by: Owl Bot --- language/snippets/classify_text/classify_text_tutorial.py | 2 +- language/snippets/cloud-client/v1/quickstart.py | 8 ++++++-- language/snippets/cloud-client/v1/set_endpoint.py | 4 +++- .../generated-samples/v1/language_sentiment_text.py | 2 +- language/snippets/sentiment/sentiment_analysis.py | 6 ++++-- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 9c05b83f589c..675f8499efc0 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -42,7 +42,7 @@ def classify(text, verbose=True): document = language_v1.Document( content=text, type_=language_v1.Document.Type.PLAIN_TEXT ) - response = language_client.classify_text(request={'document': document}) + response = language_client.classify_text(request={"document": document}) categories = response.categories result = {} diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index 4c4b06b52a14..b9b0e96c1781 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -30,10 +30,14 @@ def run_quickstart(): # The text to analyze text = u"Hello, world!" - document = language_v1.Document(content=text, type_=language_v1.Document.Type.PLAIN_TEXT) + document = language_v1.Document( + content=text, type_=language_v1.Document.Type.PLAIN_TEXT + ) # Detects the sentiment of the text - sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment + sentiment = client.analyze_sentiment( + request={"document": document} + ).document_sentiment print("Text: {}".format(text)) print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/cloud-client/v1/set_endpoint.py b/language/snippets/cloud-client/v1/set_endpoint.py index e9ad97d3e4b1..c49537a58b81 100644 --- a/language/snippets/cloud-client/v1/set_endpoint.py +++ b/language/snippets/cloud-client/v1/set_endpoint.py @@ -31,7 +31,9 @@ def set_endpoint(): ) # Detects the sentiment of the text - sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment + sentiment = client.analyze_sentiment( + request={"document": document} + ).document_sentiment print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) diff --git a/language/snippets/generated-samples/v1/language_sentiment_text.py b/language/snippets/generated-samples/v1/language_sentiment_text.py index 9f975023114f..4170ddbc4cd0 100644 --- a/language/snippets/generated-samples/v1/language_sentiment_text.py +++ b/language/snippets/generated-samples/v1/language_sentiment_text.py @@ -39,7 +39,7 @@ def sample_analyze_sentiment(content): type_ = language_v1.Document.Type.PLAIN_TEXT document = {"type_": type_, "content": content} - response = client.analyze_sentiment(request={'document': document}) + response = client.analyze_sentiment(request={"document": document}) sentiment = response.document_sentiment print("Score: {}".format(sentiment.score)) print("Magnitude: {}".format(sentiment.magnitude)) diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index 2333bf8238ab..e82c3a68ae86 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -51,8 +51,10 @@ def analyze(movie_review_filename): # Instantiates a plain text document. content = review_file.read() - document = language_v1.Document(content=content, type_=language_v1.Document.Type.PLAIN_TEXT) - annotations = client.analyze_sentiment(request={'document': document}) + document = language_v1.Document( + content=content, type_=language_v1.Document.Type.PLAIN_TEXT + ) + annotations = client.analyze_sentiment(request={"document": document}) # Print the results print_result(annotations) From f17eb0d0d7724a048f6cd8cf1660cfdcf22d1958 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 18 Nov 2021 18:56:29 +0100 Subject: [PATCH 255/323] chore(deps): update dependency google-api-python-client to v2.31.0 (#224) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index decb2aef238c..f324442a76e3 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.30.0 +google-api-python-client==2.31.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 From 26754860e16dde3b672b850685a05423b60ff106 Mon Sep 17 00:00:00 2001 From: Dina Graves Portman Date: Fri, 3 Dec 2021 13:44:55 -0500 Subject: [PATCH 256/323] chore: updating numpy (#226) * Testing if numpy works for 3.10 yet Does numpy work for 3.10? * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update requirements.txt --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 0a0e005610bf..b4f8763fa050 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.3.1 -numpy==1.20.1; python_version > '3.6' +numpy==1.21.4; python_version > '3.6' numpy==1.19.5; python_version <= '3.6' From a62a587f2c110208fba20bc137703eb18466f4eb Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 3 Dec 2021 19:51:46 +0100 Subject: [PATCH 257/323] chore(deps): update dependency google-api-python-client to v2.32.0 (#225) Co-authored-by: Dina Graves Portman --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index f324442a76e3..83ea01b003d2 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.31.0 +google-api-python-client==2.32.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 From 524badd2e2afc09e493068996886bba6d7594114 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 7 Dec 2021 19:34:55 +0100 Subject: [PATCH 258/323] chore(deps): update dependency google-api-python-client to v2.33.0 (#229) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 83ea01b003d2..56e7e8e2dc2d 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.32.0 +google-api-python-client==2.33.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 From bc921dbdbb36981e75ef0532f816d642978959f0 Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Tue, 7 Dec 2021 14:08:58 -0800 Subject: [PATCH 259/323] fix: Document -> types.Document (#227) --- language/v1/language_entity_sentiment_text.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/v1/language_entity_sentiment_text.py b/language/v1/language_entity_sentiment_text.py index b28434df9c7a..4e1341d52158 100644 --- a/language/v1/language_entity_sentiment_text.py +++ b/language/v1/language_entity_sentiment_text.py @@ -40,7 +40,7 @@ def sample_analyze_entity_sentiment(text_content): # text_content = 'Grapes are good. Bananas are bad.' # Available types: PLAIN_TEXT, HTML - type_ = language_v1.Document.Type.PLAIN_TEXT + type_ = language_v1.types.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: From 639b58cdd80a71f181b4073f0159a5a48f4e856a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 8 Jan 2022 01:39:03 +0100 Subject: [PATCH 260/323] chore(deps): update dependency google-api-python-client to v2.34.0 (#236) Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 56e7e8e2dc2d..2802f988f5c9 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.33.0 +google-api-python-client==2.34.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 From 393b6052c2bc1f46184fbd6f3f3b4bbd1156d6a5 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 11 Jan 2022 07:46:10 -0500 Subject: [PATCH 261/323] chore(samples): Add check for tests in directory (#240) Source-Link: https://github.com/googleapis/synthtool/commit/52aef91f8d25223d9dbdb4aebd94ba8eea2101f3 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:36a95b8f494e4674dc9eee9af98961293b51b86b3649942aac800ae6c1f796d4 Co-authored-by: Owl Bot --- language/snippets/api/noxfile.py | 70 +++++++++++-------- language/snippets/classify_text/noxfile.py | 70 +++++++++++-------- language/snippets/cloud-client/v1/noxfile.py | 70 +++++++++++-------- .../snippets/generated-samples/v1/noxfile.py | 70 +++++++++++-------- language/snippets/sentiment/noxfile.py | 70 +++++++++++-------- 5 files changed, 195 insertions(+), 155 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) From f1de6174adb8cd9c6624c17814c3e76b6330d8cd Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 17 Jan 2022 17:31:44 +0100 Subject: [PATCH 262/323] chore(deps): update all dependencies (#238) * chore(deps): update all dependencies * revert numpy update for python_version <= '3.6' * pin numpy 1.21.4 for python 3.7 samples * fix typo Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 2802f988f5c9..7c7432b2c2d0 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.34.0 +google-api-python-client==2.35.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index b4f8763fa050..8ebc429c078f 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,4 @@ google-cloud-language==2.3.1 -numpy==1.21.4; python_version > '3.6' +numpy==1.22.1; python_version > '3.7' +numpy==1.21.4; python_version == '3.7' numpy==1.19.5; python_version <= '3.6' From c4000fd85025d6cf0cdade16e8f77143579f41e1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 20:27:37 -0500 Subject: [PATCH 263/323] chore(python): Noxfile recognizes that tests can live in a folder (#246) Source-Link: https://github.com/googleapis/synthtool/commit/4760d8dce1351d93658cb11d02a1b7ceb23ae5d7 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f0e4b51deef56bed74d3e2359c583fc104a8d6367da3984fc5c66938db738828 Co-authored-by: Owl Bot --- language/snippets/api/noxfile.py | 1 + language/snippets/classify_text/noxfile.py | 1 + language/snippets/cloud-client/v1/noxfile.py | 1 + language/snippets/generated-samples/v1/noxfile.py | 1 + language/snippets/sentiment/noxfile.py | 1 + 5 files changed, 5 insertions(+) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: From 885cbf362c7e8bb21ed914622c00b969be5823ba Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 19 Jan 2022 02:50:55 +0100 Subject: [PATCH 264/323] chore(deps): update all dependencies (#247) * chore(deps): update all dependencies * revert pin changes for python <= 3.7 Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 7c7432b2c2d0..c86677b3d070 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.35.0 +google-api-python-client==2.36.0 google-auth==2.3.3 google-auth-httplib2==0.1.0 From 12d144f450120f9ef13d3f5286565d3d0ab4d024 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 3 Feb 2022 15:58:59 +0000 Subject: [PATCH 265/323] chore: use gapic-generator-python 0.62.1 (#260) - [ ] Regenerate this pull request now. fix: resolve DuplicateCredentialArgs error when using credentials_file committer: parthea PiperOrigin-RevId: 425964861 Source-Link: https://github.com/googleapis/googleapis/commit/84b1a5a4f6fb2d04905be58e586b8a7a4310a8cf Source-Link: https://github.com/googleapis/googleapis-gen/commit/4fb761bbd8506ac156f49bac5f18306aa8eb3aa8 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGZiNzYxYmJkODUwNmFjMTU2ZjQ5YmFjNWYxODMwNmFhOGViM2FhOCJ9 Closes #259 --- language/snippets/classify_text/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 8ebc429c078f..b562bb8631f2 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,4 +1,3 @@ google-cloud-language==2.3.1 numpy==1.22.1; python_version > '3.7' numpy==1.21.4; python_version == '3.7' -numpy==1.19.5; python_version <= '3.6' From 2a41dc6055ee1458c76f4ea89dfd071633e47b58 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 26 Feb 2022 20:04:49 +0100 Subject: [PATCH 266/323] chore(deps): update all dependencies (#253) * chore(deps): update all dependencies * revert numpy pin changes for python <= 3.7 * add classifier for python 3.9 and 3.10 Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index c86677b3d070..8418e7883736 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.36.0 -google-auth==2.3.3 +google-auth==2.4.1 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index b562bb8631f2..572357c3dd29 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.3.1 +google-cloud-language==2.3.2 numpy==1.22.1; python_version > '3.7' numpy==1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index d2b8f2f023fa..8d0fae86643e 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.3.1 +google-cloud-language==2.3.2 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index d2b8f2f023fa..8d0fae86643e 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.3.1 +google-cloud-language==2.3.2 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index d2b8f2f023fa..8d0fae86643e 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.3.1 +google-cloud-language==2.3.2 From 036ee3e5b65004c7008d0310450bc05159ae12ec Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 28 Feb 2022 23:52:22 +0100 Subject: [PATCH 267/323] chore(deps): update all dependencies (#265) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert change for python 3.7 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/api/requirements.txt | 4 ++-- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index 927094516e65..c2845bffbe89 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.1 diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 8418e7883736..16bc690d9d55 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.36.0 -google-auth==2.4.1 +google-api-python-client==2.38.0 +google-auth==2.6.0 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index 927094516e65..c2845bffbe89 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.1 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 572357c3dd29..33cadea45a45 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.3.2 -numpy==1.22.1; python_version > '3.7' +numpy==1.22.2; python_version > '3.7' numpy==1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index 927094516e65..c2845bffbe89 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.1 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index 927094516e65..c2845bffbe89 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.1 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index 927094516e65..c2845bffbe89 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.1 From 895b3fc30d477e1991d42b89c2402bd3fd60fc4b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 1 Mar 2022 16:25:09 +0100 Subject: [PATCH 268/323] chore(deps): update all dependencies (#269) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert change Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 16bc690d9d55..616dfb22f739 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.38.0 +google-api-python-client==2.39.0 google-auth==2.6.0 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 33cadea45a45..51bd8a78e0d5 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.3.2 +google-cloud-language==2.4.0 numpy==1.22.2; python_version > '3.7' numpy==1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 8d0fae86643e..94560e6fc38a 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.3.2 +google-cloud-language==2.4.0 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 8d0fae86643e..94560e6fc38a 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.3.2 +google-cloud-language==2.4.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 8d0fae86643e..94560e6fc38a 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.3.2 +google-cloud-language==2.4.0 From bccb3ff962ed58be1ab246881d803083bf574dae Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 4 Mar 2022 12:47:52 -0500 Subject: [PATCH 269/323] chore: Adding support for pytest-xdist and pytest-parallel (#274) Source-Link: https://github.com/googleapis/synthtool/commit/82f5cb283efffe96e1b6cd634738e0e7de2cd90a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:5d8da01438ece4021d135433f2cf3227aa39ef0eaccc941d62aa35e6902832ae Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/noxfile.py | 78 +++++++++++-------- language/snippets/classify_text/noxfile.py | 78 +++++++++++-------- language/snippets/cloud-client/v1/noxfile.py | 78 +++++++++++-------- .../snippets/generated-samples/v1/noxfile.py | 78 +++++++++++-------- language/snippets/sentiment/noxfile.py | 78 +++++++++++-------- 5 files changed, 220 insertions(+), 170 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 20cdfc620138..85f5836dba3a 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -188,42 +188,52 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + elif "pytest-xdist" in packages: + concurrent_args.extend(["-n", "auto"]) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 20cdfc620138..85f5836dba3a 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -188,42 +188,52 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + elif "pytest-xdist" in packages: + concurrent_args.extend(["-n", "auto"]) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 20cdfc620138..85f5836dba3a 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -188,42 +188,52 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + elif "pytest-xdist" in packages: + concurrent_args.extend(["-n", "auto"]) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 20cdfc620138..85f5836dba3a 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -188,42 +188,52 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + elif "pytest-xdist" in packages: + concurrent_args.extend(["-n", "auto"]) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 20cdfc620138..85f5836dba3a 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -188,42 +188,52 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + elif "pytest-xdist" in packages: + concurrent_args.extend(["-n", "auto"]) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) From 53ef0e232b1ad68516ba3b4a7bd13ad629889d63 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 8 Mar 2022 02:48:49 +0100 Subject: [PATCH 270/323] chore(deps): update all dependencies (#278) * chore(deps): update all dependencies * revert Co-authored-by: Anthonios Partheniou --- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 51bd8a78e0d5..1c28b1e27983 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.4.0 -numpy==1.22.2; python_version > '3.7' +google-cloud-language==2.4.1 +numpy==1.22.3; python_version > '3.7' numpy==1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 94560e6fc38a..0d2a8d65f4d3 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.0 +google-cloud-language==2.4.1 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 94560e6fc38a..0d2a8d65f4d3 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.0 +google-cloud-language==2.4.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 94560e6fc38a..0d2a8d65f4d3 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.0 +google-cloud-language==2.4.1 From fb2ba09711ba5faaa33e91f4dd344d3f98d9989f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 19:59:25 -0400 Subject: [PATCH 271/323] chore(python): use black==22.3.0 (#283) Source-Link: https://github.com/googleapis/synthtool/commit/6fab84af09f2cf89a031fd8671d1def6b2931b11 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:7cffbc10910c3ab1b852c05114a08d374c195a81cdec1d4a67a1d129331d0bfe Co-authored-by: Owl Bot --- language/snippets/api/analyze_test.py | 38 +++++++++---------- language/snippets/api/noxfile.py | 4 +- .../classify_text/classify_text_tutorial.py | 8 ++-- language/snippets/classify_text/noxfile.py | 4 +- language/snippets/cloud-client/v1/noxfile.py | 4 +- .../snippets/cloud-client/v1/quickstart.py | 2 +- .../snippets/generated-samples/v1/noxfile.py | 4 +- language/snippets/sentiment/noxfile.py | 4 +- 8 files changed, 34 insertions(+), 34 deletions(-) diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py index c797e2e3364c..da5f0ab0c4d9 100644 --- a/language/snippets/api/analyze_test.py +++ b/language/snippets/api/analyze_test.py @@ -48,7 +48,7 @@ def test_analyze_sentiment(capsys): def test_analyze_syntax(capsys): result = analyze.analyze_syntax( textwrap.dedent( - u"""\ + """\ Keep away from people who try to belittle your ambitions. Small people always do that, but the really great make you feel that you, too, can become great. @@ -71,7 +71,7 @@ def test_analyze_syntax_utf8(): bits. The offsets we get should be the index of the first byte of the character. """ - test_string = u"a \u00e3 \u0201 \U0001f636 b" + test_string = "a \u00e3 \u0201 \U0001f636 b" byte_array = test_string.encode("utf8") result = analyze.analyze_syntax(test_string, encoding="UTF8") tokens = result["tokens"] @@ -82,19 +82,19 @@ def test_analyze_syntax_utf8(): byte_array[offset : offset + 1].decode("utf8") == tokens[0]["text"]["content"] ) - assert tokens[1]["text"]["content"] == u"\u00e3" + assert tokens[1]["text"]["content"] == "\u00e3" offset = tokens[1]["text"].get("beginOffset", 0) assert ( byte_array[offset : offset + 2].decode("utf8") == tokens[1]["text"]["content"] ) - assert tokens[2]["text"]["content"] == u"\u0201" + assert tokens[2]["text"]["content"] == "\u0201" offset = tokens[2]["text"].get("beginOffset", 0) assert ( byte_array[offset : offset + 2].decode("utf8") == tokens[2]["text"]["content"] ) - assert tokens[3]["text"]["content"] == u"\U0001f636" + assert tokens[3]["text"]["content"] == "\U0001f636" offset = tokens[3]["text"].get("beginOffset", 0) assert ( byte_array[offset : offset + 4].decode("utf8") == tokens[3]["text"]["content"] @@ -102,7 +102,7 @@ def test_analyze_syntax_utf8(): # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]["text"]["content"] == u"b" + assert tokens[4]["text"]["content"] == "b" offset = tokens[4]["text"].get("beginOffset", 0) # 'b' is only one byte long assert ( @@ -117,7 +117,7 @@ def test_analyze_syntax_utf16(): bits. The returned offsets will be the index of the first 2-byte character of the token. """ - test_string = u"a \u00e3 \u0201 \U0001f636 b" + test_string = "a \u00e3 \u0201 \U0001f636 b" byte_array = test_string.encode("utf16") # Remove the byte order marker, which the offsets don't account for byte_array = byte_array[2:] @@ -133,7 +133,7 @@ def test_analyze_syntax_utf16(): byte_array[offset : offset + 2].decode("utf16") == tokens[0]["text"]["content"] ) - assert tokens[1]["text"]["content"] == u"\u00e3" + assert tokens[1]["text"]["content"] == "\u00e3" offset = 2 * tokens[1]["text"].get("beginOffset", 0) # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so # slice out 2 bytes starting from the offset. Then interpret the bytes as @@ -142,7 +142,7 @@ def test_analyze_syntax_utf16(): byte_array[offset : offset + 2].decode("utf16") == tokens[1]["text"]["content"] ) - assert tokens[2]["text"]["content"] == u"\u0201" + assert tokens[2]["text"]["content"] == "\u0201" offset = 2 * tokens[2]["text"].get("beginOffset", 0) # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so # slice out 2 bytes starting from the offset. Then interpret the bytes as @@ -151,7 +151,7 @@ def test_analyze_syntax_utf16(): byte_array[offset : offset + 2].decode("utf16") == tokens[2]["text"]["content"] ) - assert tokens[3]["text"]["content"] == u"\U0001f636" + assert tokens[3]["text"]["content"] == "\U0001f636" offset = 2 * tokens[3]["text"].get("beginOffset", 0) # A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret those bytes as @@ -162,7 +162,7 @@ def test_analyze_syntax_utf16(): # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]["text"]["content"] == u"b" + assert tokens[4]["text"]["content"] == "b" offset = 2 * tokens[4]["text"].get("beginOffset", 0) # Even though 'b' is only one byte long, utf16 still encodes it using 16 # bits @@ -192,7 +192,7 @@ def test_annotate_text_utf32(): unicode object with the raw offset returned by the api (ie without multiplying it by 4, as it is below). """ - test_string = u"a \u00e3 \u0201 \U0001f636 b" + test_string = "a \u00e3 \u0201 \U0001f636 b" byte_array = test_string.encode("utf32") # Remove the byte order marker, which the offsets don't account for byte_array = byte_array[4:] @@ -208,7 +208,7 @@ def test_annotate_text_utf32(): byte_array[offset : offset + 4].decode("utf32") == tokens[0]["text"]["content"] ) - assert tokens[1]["text"]["content"] == u"\u00e3" + assert tokens[1]["text"]["content"] == "\u00e3" offset = 4 * tokens[1]["text"].get("beginOffset", 0) # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret the bytes as @@ -217,7 +217,7 @@ def test_annotate_text_utf32(): byte_array[offset : offset + 4].decode("utf32") == tokens[1]["text"]["content"] ) - assert tokens[2]["text"]["content"] == u"\u0201" + assert tokens[2]["text"]["content"] == "\u0201" offset = 4 * tokens[2]["text"].get("beginOffset", 0) # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret the bytes as @@ -226,7 +226,7 @@ def test_annotate_text_utf32(): byte_array[offset : offset + 4].decode("utf32") == tokens[2]["text"]["content"] ) - assert tokens[3]["text"]["content"] == u"\U0001f636" + assert tokens[3]["text"]["content"] == "\U0001f636" offset = 4 * tokens[3]["text"].get("beginOffset", 0) # A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret those bytes as @@ -237,7 +237,7 @@ def test_annotate_text_utf32(): # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]["text"]["content"] == u"b" + assert tokens[4]["text"]["content"] == "b" offset = 4 * tokens[4]["text"].get("beginOffset", 0) # Even though 'b' is only one byte long, utf32 still encodes it using 32 # bits @@ -252,7 +252,7 @@ def test_annotate_text_utf32_directly_index_into_unicode(): See the explanation for test_annotate_text_utf32. Essentially, indexing into a utf32 array is equivalent to indexing into a python unicode object. """ - test_string = u"a \u00e3 \u0201 \U0001f636 b" + test_string = "a \u00e3 \u0201 \U0001f636 b" result = analyze.analyze_syntax(test_string, encoding="UTF32") tokens = result["tokens"] @@ -260,11 +260,11 @@ def test_annotate_text_utf32_directly_index_into_unicode(): offset = tokens[0]["text"].get("beginOffset", 0) assert test_string[offset] == tokens[0]["text"]["content"] - assert tokens[1]["text"]["content"] == u"\u00e3" + assert tokens[1]["text"]["content"] == "\u00e3" offset = tokens[1]["text"].get("beginOffset", 0) assert test_string[offset] == tokens[1]["text"]["content"] - assert tokens[2]["text"]["content"] == u"\u0201" + assert tokens[2]["text"]["content"] == "\u0201" offset = tokens[2]["text"].get("beginOffset", 0) assert test_string[offset] == tokens[2]["text"]["content"] diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 85f5836dba3a..25f87a215d4c 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index 675f8499efc0..de35451dd0f7 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -35,7 +35,7 @@ # [START language_classify_text_tutorial_classify] def classify(text, verbose=True): - """Classify the input text into categories. """ + """Classify the input text into categories.""" language_client = language_v1.LanguageServiceClient() @@ -56,9 +56,9 @@ def classify(text, verbose=True): if verbose: print(text) for category in categories: - print(u"=" * 20) - print(u"{:<16}: {}".format("category", category.name)) - print(u"{:<16}: {}".format("confidence", category.confidence)) + print("=" * 20) + print("{:<16}: {}".format("category", category.name)) + print("{:<16}: {}".format("confidence", category.confidence)) return result diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 85f5836dba3a..25f87a215d4c 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 85f5836dba3a..25f87a215d4c 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index b9b0e96c1781..59c1cd435cc2 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -29,7 +29,7 @@ def run_quickstart(): # [END language_python_migration_client] # The text to analyze - text = u"Hello, world!" + text = "Hello, world!" document = language_v1.Document( content=text, type_=language_v1.Document.Type.PLAIN_TEXT ) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 85f5836dba3a..25f87a215d4c 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 85f5836dba3a..25f87a215d4c 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): From 90ca214069ac0c1e8724c543bfe564abe5674654 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 30 Mar 2022 16:01:28 +0200 Subject: [PATCH 272/323] chore(deps): update all dependencies (#279) * chore(deps): update all dependencies * revert change for pinned version Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/api/requirements.txt | 4 ++-- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index c2845bffbe89..4f6bf643fc5e 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.1 diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 616dfb22f739..e39a6af76813 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.39.0 -google-auth==2.6.0 +google-api-python-client==2.42.0 +google-auth==2.6.2 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index c2845bffbe89..4f6bf643fc5e 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.1 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index c2845bffbe89..4f6bf643fc5e 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.1 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index c2845bffbe89..4f6bf643fc5e 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.1 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index c2845bffbe89..4f6bf643fc5e 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.1 From 01117be846a0fe40c0f42cba07482812ceacbc1d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 30 Mar 2022 16:43:15 +0200 Subject: [PATCH 273/323] chore: use === in requirements.txt for environment specific pins (#284) * chore(deps): update dependency numpy to v1.22.3 * chore: use === in requirements.txt for environment specific pins * revert version bump for environment specific pin Co-authored-by: Anthonios Partheniou --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 1c28b1e27983..dc049d13b590 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.4.1 numpy==1.22.3; python_version > '3.7' -numpy==1.21.4; python_version == '3.7' +numpy===1.21.4; python_version == '3.7' From 4931fd7c5a0a7afe501c12b9cc1bbaea30a7d693 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 6 Apr 2022 13:02:46 +0200 Subject: [PATCH 274/323] chore(deps): update dependency google-api-python-client to v2.43.0 (#291) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e39a6af76813..4ba22b3ffe7d 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.42.0 +google-api-python-client==2.43.0 google-auth==2.6.2 google-auth-httplib2==0.1.0 From 9fdd0e778cb2e04dce0aa2c506542c46a999ef2b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 8 Apr 2022 01:13:27 +0200 Subject: [PATCH 275/323] chore(deps): update dependency google-auth to v2.6.3 (#296) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 4ba22b3ffe7d..6c83831d6bd2 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.43.0 -google-auth==2.6.2 +google-auth==2.6.3 google-auth-httplib2==0.1.0 From cf7329b0237a0059f70548a1b766fb2725bcc991 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 13 Apr 2022 00:48:11 +0200 Subject: [PATCH 276/323] chore(deps): update dependency google-api-python-client to v2.44.0 (#299) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 6c83831d6bd2..3b0e360297fd 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.43.0 +google-api-python-client==2.44.0 google-auth==2.6.3 google-auth-httplib2==0.1.0 From 78fd6095aae75a0e8af06a69acb9ccec3cc86c50 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 13 Apr 2022 01:28:46 +0200 Subject: [PATCH 277/323] chore(deps): update dependency google-auth to v2.6.4 (#300) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 3b0e360297fd..ab42a15ec6b9 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.44.0 -google-auth==2.6.3 +google-auth==2.6.4 google-auth-httplib2==0.1.0 From 065dc6d55320d25648b0049a5816f21dcf4541c7 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 15 Apr 2022 02:45:14 +0200 Subject: [PATCH 278/323] chore(deps): update dependency google-auth to v2.6.5 (#303) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index ab42a15ec6b9..13fd78ceb4a6 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.44.0 -google-auth==2.6.4 +google-auth==2.6.5 google-auth-httplib2==0.1.0 From 364b08579c4ddcb0880f9a41aa83ffc3e87146fa Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 19 Apr 2022 16:59:43 +0200 Subject: [PATCH 279/323] chore(deps): update dependency google-api-python-client to v2.45.0 (#304) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 13fd78ceb4a6..665a8c6728a1 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.44.0 +google-api-python-client==2.45.0 google-auth==2.6.5 google-auth-httplib2==0.1.0 From 1db93cbc5339a778832764860692b1f2c8ce06a3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 20 Apr 2022 21:28:28 -0400 Subject: [PATCH 280/323] chore(python): add nox session to sort python imports (#305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(python): add nox session to sort python imports Source-Link: https://github.com/googleapis/synthtool/commit/1b71c10e20de7ed3f97f692f99a0e3399b67049f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:00c9d764fd1cd56265f12a5ef4b99a0c9e87cf261018099141e2ca5158890416 * revert change to region tag * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/noxfile.py | 23 ++++++++++++++++++- .../classify_text_tutorial_test.py | 1 - language/snippets/classify_text/noxfile.py | 23 ++++++++++++++++++- language/snippets/cloud-client/v1/noxfile.py | 23 ++++++++++++++++++- .../snippets/cloud-client/v1/quickstart.py | 1 - .../v1/language_sentiment_text.py | 1 + .../snippets/generated-samples/v1/noxfile.py | 23 ++++++++++++++++++- language/snippets/sentiment/noxfile.py | 23 ++++++++++++++++++- 8 files changed, 111 insertions(+), 7 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 25f87a215d4c..3b3ffa5d2b0f 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -22,7 +22,6 @@ import nox - # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -30,6 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +168,33 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/language/snippets/classify_text/classify_text_tutorial_test.py b/language/snippets/classify_text/classify_text_tutorial_test.py index 5e8211299bf7..5859a771ea23 100644 --- a/language/snippets/classify_text/classify_text_tutorial_test.py +++ b/language/snippets/classify_text/classify_text_tutorial_test.py @@ -17,7 +17,6 @@ import classify_text_tutorial - OUTPUT = "index.json" RESOURCES = os.path.join(os.path.dirname(__file__), "resources") QUERY_TEXT = """Google Home enables users to speak voice commands to interact diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 25f87a215d4c..3b3ffa5d2b0f 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -22,7 +22,6 @@ import nox - # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -30,6 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +168,33 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 25f87a215d4c..3b3ffa5d2b0f 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -22,7 +22,6 @@ import nox - # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -30,6 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +168,33 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index 59c1cd435cc2..bbc914d16149 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -22,7 +22,6 @@ def run_quickstart(): from google.cloud import language_v1 # [END language_python_migration_imports] - # Instantiates a client # [START language_python_migration_client] client = language_v1.LanguageServiceClient() diff --git a/language/snippets/generated-samples/v1/language_sentiment_text.py b/language/snippets/generated-samples/v1/language_sentiment_text.py index 4170ddbc4cd0..13447d17c3b3 100644 --- a/language/snippets/generated-samples/v1/language_sentiment_text.py +++ b/language/snippets/generated-samples/v1/language_sentiment_text.py @@ -21,6 +21,7 @@ import sys +# isort: split # [START language_sentiment_text] from google.cloud import language_v1 diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 25f87a215d4c..3b3ffa5d2b0f 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -22,7 +22,6 @@ import nox - # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -30,6 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +168,33 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 25f87a215d4c..3b3ffa5d2b0f 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -22,7 +22,6 @@ import nox - # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -30,6 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +168,33 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # From 6e766425d6cf4a8b667c4e4caf1acb4b42b083d6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 22 Apr 2022 10:27:20 +0200 Subject: [PATCH 281/323] chore(deps): update dependency google-auth to v2.6.6 (#308) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 665a8c6728a1..ceb309e4c596 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.45.0 -google-auth==2.6.5 +google-auth==2.6.6 google-auth-httplib2==0.1.0 From 3debf0006a068cb716ce1bcb401b3a438c997fb5 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 25 Apr 2022 17:08:42 +0200 Subject: [PATCH 282/323] chore(deps): update dependency pytest to v7.1.2 (#310) --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 From 8cc85a3c4cfdcc41256550b124f6a9345485421c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 26 Apr 2022 20:03:33 +0200 Subject: [PATCH 283/323] chore(deps): update dependency google-api-python-client to v2.46.0 (#311) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index ceb309e4c596..658c99bfac2b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.45.0 +google-api-python-client==2.46.0 google-auth==2.6.6 google-auth-httplib2==0.1.0 From 61f74ae8050384defb1241f1f52d156218f55153 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 3 May 2022 20:20:30 +0200 Subject: [PATCH 284/323] chore(deps): update dependency google-api-python-client to v2.47.0 (#313) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 658c99bfac2b..654cf0f4772f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.46.0 +google-api-python-client==2.47.0 google-auth==2.6.6 google-auth-httplib2==0.1.0 From 0a8110652edc96218173853c7093f7ff934a4d56 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 17 May 2022 20:49:23 +0200 Subject: [PATCH 285/323] chore(deps): update dependency google-api-python-client to v2.48.0 (#318) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 654cf0f4772f..04f99eac1ec0 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.47.0 +google-api-python-client==2.48.0 google-auth==2.6.6 google-auth-httplib2==0.1.0 From 875de18e223fe5158e74ecf5f0e42394c3f8e5bd Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 19 May 2022 17:51:06 +0200 Subject: [PATCH 286/323] chore(deps): update dependency google-cloud-language to v2.4.2 (#319) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index dc049d13b590..18edbce3d71c 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.4.1 +google-cloud-language==2.4.2 numpy==1.22.3; python_version > '3.7' numpy===1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 0d2a8d65f4d3..33f606ed26ad 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.1 +google-cloud-language==2.4.2 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 0d2a8d65f4d3..33f606ed26ad 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.1 +google-cloud-language==2.4.2 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 0d2a8d65f4d3..33f606ed26ad 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.1 +google-cloud-language==2.4.2 From e1b3fddd0515dc92ae5c1922149ba995b3f9fffa Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sun, 22 May 2022 20:08:05 +0200 Subject: [PATCH 287/323] chore(deps): update dependency numpy to v1.22.4 (#320) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 18edbce3d71c..d0380844532f 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.4.2 -numpy==1.22.3; python_version > '3.7' +numpy==1.22.4; python_version > '3.7' numpy===1.21.4; python_version == '3.7' From 1749119862f14e05c0b9aaa9f757d826ef459f44 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 27 May 2022 19:51:02 +0200 Subject: [PATCH 288/323] chore(deps): update dependency google-api-python-client to v2.49.0 (#321) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 04f99eac1ec0..e9b14cef2e42 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.48.0 +google-api-python-client==2.49.0 google-auth==2.6.6 google-auth-httplib2==0.1.0 From e7a3cf7c7c52af672467214fbaf12ef0870e4810 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 7 Jul 2022 10:14:10 -0400 Subject: [PATCH 289/323] fix: require python 3.7+ (#336) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(python): drop python 3.6 Source-Link: https://github.com/googleapis/synthtool/commit/4f89b13af10d086458f9b379e56a614f9d6dab7b Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:e7bb19d47c13839fe8c147e50e02e8b6cf5da8edd1af8b82208cd6f66cc2829c * add api_description to .repo-metadata.json * require python 3.7+ in setup.py * remove python 3.6 sample configs * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/noxfile.py | 2 +- language/snippets/classify_text/noxfile.py | 2 +- language/snippets/cloud-client/v1/noxfile.py | 2 +- language/snippets/generated-samples/v1/noxfile.py | 2 +- language/snippets/sentiment/noxfile.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index 3b3ffa5d2b0f..e9eb1cbfa5db 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index 3b3ffa5d2b0f..e9eb1cbfa5db 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index 3b3ffa5d2b0f..e9eb1cbfa5db 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index 3b3ffa5d2b0f..e9eb1cbfa5db 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index 3b3ffa5d2b0f..e9eb1cbfa5db 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] From 210ce3c3733a93e67d7d8f1c2888f73ba183ba69 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 7 Jul 2022 18:31:47 +0200 Subject: [PATCH 290/323] chore(deps): update all dependencies (#329) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e9b14cef2e42..08c8a7851b06 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.49.0 -google-auth==2.6.6 +google-api-python-client==2.50.0 +google-auth==2.7.0 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index d0380844532f..f4013916bc6d 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.4.2 +google-cloud-language==2.4.3 numpy==1.22.4; python_version > '3.7' numpy===1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 33f606ed26ad..964e9dc4ae56 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.2 +google-cloud-language==2.4.3 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 33f606ed26ad..964e9dc4ae56 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.2 +google-cloud-language==2.4.3 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 33f606ed26ad..964e9dc4ae56 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.2 +google-cloud-language==2.4.3 From fe8d151d968c25c49d7b960f738f904cb4303e0c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 2 Aug 2022 17:05:42 +0200 Subject: [PATCH 291/323] chore(deps): update all dependencies (#344) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 4 ++-- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 08c8a7851b06..9630b7adcded 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.50.0 -google-auth==2.7.0 +google-api-python-client==2.55.0 +google-auth==2.9.1 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index f4013916bc6d..32c8e5a79642 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.4.3 -numpy==1.22.4; python_version > '3.7' +google-cloud-language==2.5.1 +numpy==1.23.1; python_version > '3.7' numpy===1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index 964e9dc4ae56..ed1b691cc919 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.3 +google-cloud-language==2.5.1 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index 964e9dc4ae56..ed1b691cc919 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.3 +google-cloud-language==2.5.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index 964e9dc4ae56..ed1b691cc919 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.4.3 +google-cloud-language==2.5.1 From 40278f4d6c1794c7109f3bedf9f247a7cdd572f2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 6 Aug 2022 03:21:59 +0200 Subject: [PATCH 292/323] chore(deps): update all dependencies (#346) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 9630b7adcded..9f2cf41aafa7 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.55.0 -google-auth==2.9.1 +google-auth==2.10.0 google-auth-httplib2==0.1.0 From e2acd1d7b449242bd378c0c8f3b15e8897efce78 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 9 Aug 2022 17:13:10 +0200 Subject: [PATCH 293/323] chore(deps): update all dependencies (#347) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * revert * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Anthonios Partheniou Co-authored-by: Owl Bot --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 9f2cf41aafa7..773c3e1e6c8f 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.55.0 +google-api-python-client==2.56.0 google-auth==2.10.0 google-auth-httplib2==0.1.0 From da2d2c5d8c83a4d6f10a89a46bf90fa673950cb1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 16 Aug 2022 17:00:02 +0200 Subject: [PATCH 294/323] chore(deps): update dependency numpy to v1.23.2 (#352) Co-authored-by: Anthonios Partheniou --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 32c8e5a79642..d8f8d38410a3 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.5.1 -numpy==1.23.1; python_version > '3.7' +numpy==1.23.2; python_version > '3.7' numpy===1.21.4; python_version == '3.7' From 83fdbf6e7ef2b2b28d680f26f9b5b8d05b3a9c48 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 16 Aug 2022 17:53:40 +0200 Subject: [PATCH 295/323] chore(deps): update all dependencies (#353) --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 773c3e1e6c8f..e018afc8b862 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.56.0 +google-api-python-client==2.57.0 google-auth==2.10.0 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index d8f8d38410a3..fcb7426b1675 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.5.1 +google-cloud-language==2.5.2 numpy==1.23.2; python_version > '3.7' numpy===1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index ed1b691cc919..db52d214f587 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.5.1 +google-cloud-language==2.5.2 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index ed1b691cc919..db52d214f587 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.5.1 +google-cloud-language==2.5.2 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index ed1b691cc919..db52d214f587 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.5.1 +google-cloud-language==2.5.2 From a89da92f0ca11b3cd715e65aab9289ca787d394a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 23 Aug 2022 16:29:54 +0200 Subject: [PATCH 296/323] chore(deps): update dependency google-auth to v2.11.0 (#354) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index e018afc8b862..c69f77e1874e 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.57.0 -google-auth==2.10.0 +google-auth==2.11.0 google-auth-httplib2==0.1.0 From 4eda97f58616cb9b5e97c576ed7f409803b435f3 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 23 Aug 2022 17:45:44 +0200 Subject: [PATCH 297/323] chore(deps): update dependency google-api-python-client to v2.58.0 (#355) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index c69f77e1874e..fc34cff22d31 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.57.0 +google-api-python-client==2.58.0 google-auth==2.11.0 google-auth-httplib2==0.1.0 From 0efea8c415a0c6d760e98455eaf11745b081763a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 6 Sep 2022 17:41:18 +0200 Subject: [PATCH 298/323] chore(deps): update dependency pytest to v7.1.3 (#366) --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 From 0de31f66b051c7d249242a4bc7e536ade6f2ebf4 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 6 Sep 2022 21:36:36 +0200 Subject: [PATCH 299/323] chore(deps): update dependency google-api-python-client to v2.60.0 (#367) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index fc34cff22d31..0304f99b12b6 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.58.0 +google-api-python-client==2.60.0 google-auth==2.11.0 google-auth-httplib2==0.1.0 From 7cd4d6e4d016d11aec7b2dade6e3a5ab6c695023 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 12 Sep 2022 16:43:52 +0200 Subject: [PATCH 300/323] chore(deps): update dependency numpy to v1.23.3 (#370) --- language/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index fcb7426b1675..954099fff0e8 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ google-cloud-language==2.5.2 -numpy==1.23.2; python_version > '3.7' +numpy==1.23.3; python_version > '3.7' numpy===1.21.4; python_version == '3.7' From c0c088ad24000cf3a45b95676ccaf64538568b4b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 13 Sep 2022 16:18:28 +0000 Subject: [PATCH 301/323] chore: detect samples tests in nested directories (#372) Source-Link: https://github.com/googleapis/synthtool/commit/50db768f450a50d7c1fd62513c113c9bb96fd434 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:e09366bdf0fd9c8976592988390b24d53583dd9f002d476934da43725adbb978 --- language/snippets/api/noxfile.py | 6 ++++-- language/snippets/classify_text/noxfile.py | 6 ++++-- language/snippets/cloud-client/v1/noxfile.py | 6 ++++-- language/snippets/generated-samples/v1/noxfile.py | 6 ++++-- language/snippets/sentiment/noxfile.py | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py index e9eb1cbfa5db..c1715136d645 100644 --- a/language/snippets/api/noxfile.py +++ b/language/snippets/api/noxfile.py @@ -207,8 +207,10 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py index e9eb1cbfa5db..c1715136d645 100644 --- a/language/snippets/classify_text/noxfile.py +++ b/language/snippets/classify_text/noxfile.py @@ -207,8 +207,10 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py index e9eb1cbfa5db..c1715136d645 100644 --- a/language/snippets/cloud-client/v1/noxfile.py +++ b/language/snippets/cloud-client/v1/noxfile.py @@ -207,8 +207,10 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py index e9eb1cbfa5db..c1715136d645 100644 --- a/language/snippets/generated-samples/v1/noxfile.py +++ b/language/snippets/generated-samples/v1/noxfile.py @@ -207,8 +207,10 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py index e9eb1cbfa5db..c1715136d645 100644 --- a/language/snippets/sentiment/noxfile.py +++ b/language/snippets/sentiment/noxfile.py @@ -207,8 +207,10 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") From 0c5254c4a72f0d6b193f5d9e900421f0e0c72378 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 14 Sep 2022 15:54:19 +0200 Subject: [PATCH 302/323] chore(deps): update dependency google-api-python-client to v2.61.0 (#373) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 0304f99b12b6..6328b5f14727 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.60.0 +google-api-python-client==2.61.0 google-auth==2.11.0 google-auth-httplib2==0.1.0 From 57d64d34a1bbc012cbe10e454ace89e70be85d1b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 20 Sep 2022 13:40:47 +0200 Subject: [PATCH 303/323] chore(deps): update dependency google-auth to v2.11.1 (#374) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 6328b5f14727..30939e162169 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.61.0 -google-auth==2.11.0 +google-auth==2.11.1 google-auth-httplib2==0.1.0 From 0205f39915762d3139d7e70de953550598e50eb9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 20 Sep 2022 13:51:02 +0200 Subject: [PATCH 304/323] chore(deps): update dependency google-api-python-client to v2.62.0 (#375) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 30939e162169..3b6591166587 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.61.0 +google-api-python-client==2.62.0 google-auth==2.11.1 google-auth-httplib2==0.1.0 From 523b1f41a608a5a34c1e8038bb52829fab11edc4 Mon Sep 17 00:00:00 2001 From: wizeng23 Date: Tue, 27 Sep 2022 07:58:25 -0700 Subject: [PATCH 305/323] docs: update classification sample to use v2 model (#378) * docs: update classification sample to use v2 model * fix: update classification test to use v2 model Co-authored-by: Anthonios Partheniou --- language/v1/language_classify_text.py | 15 ++++++++++++--- language/v1/test/classifying_content.test.yaml | 10 +++++----- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index 6fe2aaa4b60d..fe2b56719cab 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -32,12 +32,12 @@ def sample_classify_text(text_content): Classifying Content in a String Args: - text_content The text content to analyze. Must include at least 20 words. + text_content The text content to analyze. """ client = language_v1.LanguageServiceClient() - # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' + # text_content = "That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows." # Available types: PLAIN_TEXT, HTML type_ = language_v1.Document.Type.PLAIN_TEXT @@ -48,7 +48,16 @@ def sample_classify_text(text_content): language = "en" document = {"content": text_content, "type_": type_, "language": language} - response = client.classify_text(request = {'document': document}) + content_categories_version = ( + language_v1.ClassificationModelOptions.V2Model.ContentCategoriesVersion.V2) + response = client.classify_text(request = { + "document": document, + "classification_model_options": { + "v2_model": { + "content_categories_version": content_categories_version + } + } + }) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. diff --git a/language/v1/test/classifying_content.test.yaml b/language/v1/test/classifying_content.test.yaml index 5cfc76696b25..4b5f121d7d14 100644 --- a/language/v1/test/classifying_content.test.yaml +++ b/language/v1/test/classifying_content.test.yaml @@ -16,15 +16,15 @@ test: - name: language_classify_text - Classifying Content of a text string (*custom value*) spec: - # Custom value: "Let's drink coffee and eat bagels at a coffee shop. I want muffins, croisants, coffee and baked goods." + # Custom value: "Dungeons and dragons and loot, oh my!" - call: sample: language_classify_text params: - text_content: {literal: "Let's drink coffee and eat bagels at a coffee shop. I want muffins, croisants, coffee and baked goods."} + text_content: {literal: "Dungeons and dragons and loot, oh my!"} - assert_contains_any: - - {literal: "Food"} - - {literal: "Drink"} - - {literal: "Coffee"} + - {literal: "Games"} + - {literal: "Roleplaying"} + - {literal: "Computer"} - name: language_classify_gcs - Classifying Content of text file in GCS (default value) spec: From 7bf432c8d2b06becf936f7749ee6f1e19a81cab3 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 3 Oct 2022 19:06:35 +0200 Subject: [PATCH 306/323] chore(deps): update all dependencies (#382) Co-authored-by: Anthonios Partheniou --- language/snippets/api/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 3b6591166587..fca4e863c90b 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.62.0 -google-auth==2.11.1 +google-api-python-client==2.63.0 +google-auth==2.12.0 google-auth-httplib2==0.1.0 From c88316736b7fef2997ca0c49a394c43f94ef47e6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 4 Oct 2022 02:53:52 +0200 Subject: [PATCH 307/323] chore(deps): update dependency google-cloud-language to v2.6.0 (#383) --- language/snippets/classify_text/requirements.txt | 2 +- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 954099fff0e8..933b1bf8469e 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.5.2 +google-cloud-language==2.6.0 numpy==1.23.3; python_version > '3.7' numpy===1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index db52d214f587..b9a49269b65e 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.5.2 +google-cloud-language==2.6.0 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index db52d214f587..b9a49269b65e 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.5.2 +google-cloud-language==2.6.0 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index db52d214f587..b9a49269b65e 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.5.2 +google-cloud-language==2.6.0 From ce8bf8e9dc541cd4fdfec9a22ade3db6c3bb812d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 4 Oct 2022 20:19:14 +0200 Subject: [PATCH 308/323] chore(deps): update dependency google-api-python-client to v2.64.0 (#384) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index fca4e863c90b..3cc81ab83d54 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.63.0 +google-api-python-client==2.64.0 google-auth==2.12.0 google-auth-httplib2==0.1.0 From 0df3149a419d868ff925f0c5fa360ebfb16ed9a0 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 18 Oct 2022 15:23:47 +0200 Subject: [PATCH 309/323] chore(deps): update all dependencies (#387) --- language/snippets/api/requirements.txt | 2 +- language/snippets/classify_text/requirements.txt | 4 ++-- language/snippets/cloud-client/v1/requirements.txt | 2 +- language/snippets/generated-samples/v1/requirements.txt | 2 +- language/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 3cc81ab83d54..d5bb9dbfc067 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.64.0 -google-auth==2.12.0 +google-auth==2.13.0 google-auth-httplib2==0.1.0 diff --git a/language/snippets/classify_text/requirements.txt b/language/snippets/classify_text/requirements.txt index 933b1bf8469e..f53284c69dd3 100644 --- a/language/snippets/classify_text/requirements.txt +++ b/language/snippets/classify_text/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-language==2.6.0 -numpy==1.23.3; python_version > '3.7' +google-cloud-language==2.6.1 +numpy==1.23.4; python_version > '3.7' numpy===1.21.4; python_version == '3.7' diff --git a/language/snippets/cloud-client/v1/requirements.txt b/language/snippets/cloud-client/v1/requirements.txt index b9a49269b65e..c3458e3d62f8 100644 --- a/language/snippets/cloud-client/v1/requirements.txt +++ b/language/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.6.0 +google-cloud-language==2.6.1 diff --git a/language/snippets/generated-samples/v1/requirements.txt b/language/snippets/generated-samples/v1/requirements.txt index b9a49269b65e..c3458e3d62f8 100644 --- a/language/snippets/generated-samples/v1/requirements.txt +++ b/language/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.6.0 +google-cloud-language==2.6.1 diff --git a/language/snippets/sentiment/requirements.txt b/language/snippets/sentiment/requirements.txt index b9a49269b65e..c3458e3d62f8 100644 --- a/language/snippets/sentiment/requirements.txt +++ b/language/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==2.6.0 +google-cloud-language==2.6.1 From 422c0f1934a5878a6d3f5222098446a761407a99 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 19 Oct 2022 16:04:27 +0200 Subject: [PATCH 310/323] chore(deps): update dependency google-api-python-client to v2.65.0 (#389) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index d5bb9dbfc067..8d79b99494c0 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==2.64.0 +google-api-python-client==2.65.0 google-auth==2.13.0 google-auth-httplib2==0.1.0 From 3c4810e64b769c291c9b21b686fcc9ed54ebbabe Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 26 Oct 2022 12:48:50 +0200 Subject: [PATCH 311/323] chore(deps): update dependency pytest to v7.2.0 (#390) --- language/snippets/api/requirements-test.txt | 2 +- language/snippets/classify_text/requirements-test.txt | 2 +- language/snippets/cloud-client/v1/requirements-test.txt | 2 +- language/snippets/generated-samples/v1/requirements-test.txt | 2 +- language/snippets/sentiment/requirements-test.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/language/snippets/api/requirements-test.txt b/language/snippets/api/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/language/snippets/api/requirements-test.txt +++ b/language/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/language/snippets/classify_text/requirements-test.txt b/language/snippets/classify_text/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/language/snippets/classify_text/requirements-test.txt +++ b/language/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/language/snippets/cloud-client/v1/requirements-test.txt b/language/snippets/cloud-client/v1/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/language/snippets/cloud-client/v1/requirements-test.txt +++ b/language/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/language/snippets/generated-samples/v1/requirements-test.txt b/language/snippets/generated-samples/v1/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/language/snippets/generated-samples/v1/requirements-test.txt +++ b/language/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/language/snippets/sentiment/requirements-test.txt b/language/snippets/sentiment/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/language/snippets/sentiment/requirements-test.txt +++ b/language/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 From c3b44f41a5ef74509c2ed9d674f7dbb67e0605b8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 1 Nov 2022 14:12:01 +0100 Subject: [PATCH 312/323] chore(deps): update dependency google-auth to v2.14.0 (#395) --- language/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/api/requirements.txt b/language/snippets/api/requirements.txt index 8d79b99494c0..8cef7de4e373 100644 --- a/language/snippets/api/requirements.txt +++ b/language/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==2.65.0 -google-auth==2.13.0 +google-auth==2.14.0 google-auth-httplib2==0.1.0 From 7a9d480641cf62a24b537929419e4b9c75b5145e Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Fri, 18 Nov 2022 13:51:46 +0100 Subject: [PATCH 313/323] Removing noxfiles + adding requirements.txt --- language/snippets/api/noxfile.py | 312 ------------------ language/snippets/classify_text/noxfile.py | 312 ------------------ language/snippets/cloud-client/v1/noxfile.py | 312 ------------------ .../snippets/generated-samples/v1/noxfile.py | 312 ------------------ language/snippets/sentiment/noxfile.py | 312 ------------------ language/v1/requirements-test.txt | 1 + language/v1/requirements.txt | 1 + 7 files changed, 2 insertions(+), 1560 deletions(-) delete mode 100644 language/snippets/api/noxfile.py delete mode 100644 language/snippets/classify_text/noxfile.py delete mode 100644 language/snippets/cloud-client/v1/noxfile.py delete mode 100644 language/snippets/generated-samples/v1/noxfile.py delete mode 100644 language/snippets/sentiment/noxfile.py create mode 100644 language/v1/requirements-test.txt create mode 100644 language/v1/requirements.txt diff --git a/language/snippets/api/noxfile.py b/language/snippets/api/noxfile.py deleted file mode 100644 index c1715136d645..000000000000 --- a/language/snippets/api/noxfile.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import glob -import os -from pathlib import Path -import sys -from typing import Callable, Dict, List, Optional - -import nox - -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING -# DO NOT EDIT THIS FILE EVER! -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING - -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" - -# Copy `noxfile_config.py` to your directory and modify it instead. - -# `TEST_CONFIG` dict is a configuration hook that allows users to -# modify the test configurations. The values here should be in sync -# with `noxfile_config.py`. Users will copy `noxfile_config.py` into -# their directory and modify it. - -TEST_CONFIG = { - # You can opt out from the test for specific Python versions. - "ignored_versions": [], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": False, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} - - -try: - # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") - from noxfile_config import TEST_CONFIG_OVERRIDE -except ImportError as e: - print("No user noxfile_config found: detail: {}".format(e)) - TEST_CONFIG_OVERRIDE = {} - -# Update the TEST_CONFIG with the user supplied values. -TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) - - -def get_pytest_env_vars() -> Dict[str, str]: - """Returns a dict for pytest invocation.""" - ret = {} - - # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] - # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] - - # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) - return ret - - -# DO NOT EDIT - automatically generated. -# All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] - -# Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] - -TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) - -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( - "True", - "true", -) - -# Error if a python version is missing -nox.options.error_on_missing_interpreters = True - -# -# Style Checks -# - - -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - -# Linting with flake8. -# -# We ignore the following rules: -# E203: whitespace before ‘:’ -# E266: too many leading ‘#’ for block comment -# E501: line too long -# I202: Additional newline in a section of imports -# -# We also need to specify the rules which are ignored by default: -# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] -FLAKE8_COMMON_ARGS = [ - "--show-source", - "--builtin=gettext", - "--max-complexity=20", - "--import-order-style=google", - "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", - "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", - "--max-line-length=88", -] - - -@nox.session -def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") - else: - session.install("flake8", "flake8-import-order", "flake8-annotations") - - local_names = _determine_local_import_names(".") - args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), - ".", - ] - session.run("flake8", *args) - - -# -# Black -# - - -@nox.session -def blacken(session: nox.sessions.Session) -> None: - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - session.run("black", *python_files) - - -# -# format = isort + black -# - - -@nox.session -def format(session: nox.sessions.Session) -> None: - """ - Run isort to sort imports. Then run black - to format code to uniform standard. - """ - session.install(BLACK_VERSION, ISORT_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - # Use the --fss option to sort imports using strict alphabetical order. - # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections - session.run("isort", "--fss", *python_files) - session.run("black", *python_files) - - -# -# Sample Tests -# - - -PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] - - -def _session_tests( - session: nox.sessions.Session, post_install: Callable = None -) -> None: - # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) - test_list.extend(glob.glob("**/tests", recursive=True)) - - if len(test_list) == 0: - print("No tests found, skipping directory.") - return - - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - concurrent_args = [] - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - with open("requirements.txt") as rfile: - packages = rfile.read() - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - with open("requirements-test.txt") as rtfile: - packages += rtfile.read() - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) - elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) - - -@nox.session(python=ALL_VERSIONS) -def py(session: nox.sessions.Session) -> None: - """Runs py.test for a sample using the specified version of Python.""" - if session.python in TESTED_VERSIONS: - _session_tests(session) - else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) - - -# -# Readmegen -# - - -def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" - # Get root of this repository. Assume we don't have directories nested deeper than 10 items. - p = Path(os.getcwd()) - for i in range(10): - if p is None: - break - if Path(p / ".git").exists(): - return str(p) - # .git is not available in repos cloned via Cloud Build - # setup.py is always in the library's root, so use that instead - # https://github.com/googleapis/synthtool/issues/792 - if Path(p / "setup.py").exists(): - return str(p) - p = p.parent - raise Exception("Unable to detect repository root.") - - -GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) - - -@nox.session -@nox.parametrize("path", GENERATED_READMES) -def readmegen(session: nox.sessions.Session, path: str) -> None: - """(Re-)generates the readme for a sample.""" - session.install("jinja2", "pyyaml") - dir_ = os.path.dirname(path) - - if os.path.exists(os.path.join(dir_, "requirements.txt")): - session.install("-r", os.path.join(dir_, "requirements.txt")) - - in_file = os.path.join(dir_, "README.rst.in") - session.run( - "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file - ) diff --git a/language/snippets/classify_text/noxfile.py b/language/snippets/classify_text/noxfile.py deleted file mode 100644 index c1715136d645..000000000000 --- a/language/snippets/classify_text/noxfile.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import glob -import os -from pathlib import Path -import sys -from typing import Callable, Dict, List, Optional - -import nox - -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING -# DO NOT EDIT THIS FILE EVER! -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING - -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" - -# Copy `noxfile_config.py` to your directory and modify it instead. - -# `TEST_CONFIG` dict is a configuration hook that allows users to -# modify the test configurations. The values here should be in sync -# with `noxfile_config.py`. Users will copy `noxfile_config.py` into -# their directory and modify it. - -TEST_CONFIG = { - # You can opt out from the test for specific Python versions. - "ignored_versions": [], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": False, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} - - -try: - # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") - from noxfile_config import TEST_CONFIG_OVERRIDE -except ImportError as e: - print("No user noxfile_config found: detail: {}".format(e)) - TEST_CONFIG_OVERRIDE = {} - -# Update the TEST_CONFIG with the user supplied values. -TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) - - -def get_pytest_env_vars() -> Dict[str, str]: - """Returns a dict for pytest invocation.""" - ret = {} - - # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] - # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] - - # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) - return ret - - -# DO NOT EDIT - automatically generated. -# All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] - -# Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] - -TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) - -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( - "True", - "true", -) - -# Error if a python version is missing -nox.options.error_on_missing_interpreters = True - -# -# Style Checks -# - - -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - -# Linting with flake8. -# -# We ignore the following rules: -# E203: whitespace before ‘:’ -# E266: too many leading ‘#’ for block comment -# E501: line too long -# I202: Additional newline in a section of imports -# -# We also need to specify the rules which are ignored by default: -# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] -FLAKE8_COMMON_ARGS = [ - "--show-source", - "--builtin=gettext", - "--max-complexity=20", - "--import-order-style=google", - "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", - "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", - "--max-line-length=88", -] - - -@nox.session -def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") - else: - session.install("flake8", "flake8-import-order", "flake8-annotations") - - local_names = _determine_local_import_names(".") - args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), - ".", - ] - session.run("flake8", *args) - - -# -# Black -# - - -@nox.session -def blacken(session: nox.sessions.Session) -> None: - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - session.run("black", *python_files) - - -# -# format = isort + black -# - - -@nox.session -def format(session: nox.sessions.Session) -> None: - """ - Run isort to sort imports. Then run black - to format code to uniform standard. - """ - session.install(BLACK_VERSION, ISORT_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - # Use the --fss option to sort imports using strict alphabetical order. - # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections - session.run("isort", "--fss", *python_files) - session.run("black", *python_files) - - -# -# Sample Tests -# - - -PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] - - -def _session_tests( - session: nox.sessions.Session, post_install: Callable = None -) -> None: - # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) - test_list.extend(glob.glob("**/tests", recursive=True)) - - if len(test_list) == 0: - print("No tests found, skipping directory.") - return - - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - concurrent_args = [] - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - with open("requirements.txt") as rfile: - packages = rfile.read() - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - with open("requirements-test.txt") as rtfile: - packages += rtfile.read() - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) - elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) - - -@nox.session(python=ALL_VERSIONS) -def py(session: nox.sessions.Session) -> None: - """Runs py.test for a sample using the specified version of Python.""" - if session.python in TESTED_VERSIONS: - _session_tests(session) - else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) - - -# -# Readmegen -# - - -def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" - # Get root of this repository. Assume we don't have directories nested deeper than 10 items. - p = Path(os.getcwd()) - for i in range(10): - if p is None: - break - if Path(p / ".git").exists(): - return str(p) - # .git is not available in repos cloned via Cloud Build - # setup.py is always in the library's root, so use that instead - # https://github.com/googleapis/synthtool/issues/792 - if Path(p / "setup.py").exists(): - return str(p) - p = p.parent - raise Exception("Unable to detect repository root.") - - -GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) - - -@nox.session -@nox.parametrize("path", GENERATED_READMES) -def readmegen(session: nox.sessions.Session, path: str) -> None: - """(Re-)generates the readme for a sample.""" - session.install("jinja2", "pyyaml") - dir_ = os.path.dirname(path) - - if os.path.exists(os.path.join(dir_, "requirements.txt")): - session.install("-r", os.path.join(dir_, "requirements.txt")) - - in_file = os.path.join(dir_, "README.rst.in") - session.run( - "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file - ) diff --git a/language/snippets/cloud-client/v1/noxfile.py b/language/snippets/cloud-client/v1/noxfile.py deleted file mode 100644 index c1715136d645..000000000000 --- a/language/snippets/cloud-client/v1/noxfile.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import glob -import os -from pathlib import Path -import sys -from typing import Callable, Dict, List, Optional - -import nox - -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING -# DO NOT EDIT THIS FILE EVER! -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING - -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" - -# Copy `noxfile_config.py` to your directory and modify it instead. - -# `TEST_CONFIG` dict is a configuration hook that allows users to -# modify the test configurations. The values here should be in sync -# with `noxfile_config.py`. Users will copy `noxfile_config.py` into -# their directory and modify it. - -TEST_CONFIG = { - # You can opt out from the test for specific Python versions. - "ignored_versions": [], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": False, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} - - -try: - # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") - from noxfile_config import TEST_CONFIG_OVERRIDE -except ImportError as e: - print("No user noxfile_config found: detail: {}".format(e)) - TEST_CONFIG_OVERRIDE = {} - -# Update the TEST_CONFIG with the user supplied values. -TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) - - -def get_pytest_env_vars() -> Dict[str, str]: - """Returns a dict for pytest invocation.""" - ret = {} - - # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] - # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] - - # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) - return ret - - -# DO NOT EDIT - automatically generated. -# All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] - -# Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] - -TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) - -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( - "True", - "true", -) - -# Error if a python version is missing -nox.options.error_on_missing_interpreters = True - -# -# Style Checks -# - - -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - -# Linting with flake8. -# -# We ignore the following rules: -# E203: whitespace before ‘:’ -# E266: too many leading ‘#’ for block comment -# E501: line too long -# I202: Additional newline in a section of imports -# -# We also need to specify the rules which are ignored by default: -# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] -FLAKE8_COMMON_ARGS = [ - "--show-source", - "--builtin=gettext", - "--max-complexity=20", - "--import-order-style=google", - "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", - "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", - "--max-line-length=88", -] - - -@nox.session -def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") - else: - session.install("flake8", "flake8-import-order", "flake8-annotations") - - local_names = _determine_local_import_names(".") - args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), - ".", - ] - session.run("flake8", *args) - - -# -# Black -# - - -@nox.session -def blacken(session: nox.sessions.Session) -> None: - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - session.run("black", *python_files) - - -# -# format = isort + black -# - - -@nox.session -def format(session: nox.sessions.Session) -> None: - """ - Run isort to sort imports. Then run black - to format code to uniform standard. - """ - session.install(BLACK_VERSION, ISORT_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - # Use the --fss option to sort imports using strict alphabetical order. - # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections - session.run("isort", "--fss", *python_files) - session.run("black", *python_files) - - -# -# Sample Tests -# - - -PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] - - -def _session_tests( - session: nox.sessions.Session, post_install: Callable = None -) -> None: - # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) - test_list.extend(glob.glob("**/tests", recursive=True)) - - if len(test_list) == 0: - print("No tests found, skipping directory.") - return - - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - concurrent_args = [] - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - with open("requirements.txt") as rfile: - packages = rfile.read() - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - with open("requirements-test.txt") as rtfile: - packages += rtfile.read() - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) - elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) - - -@nox.session(python=ALL_VERSIONS) -def py(session: nox.sessions.Session) -> None: - """Runs py.test for a sample using the specified version of Python.""" - if session.python in TESTED_VERSIONS: - _session_tests(session) - else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) - - -# -# Readmegen -# - - -def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" - # Get root of this repository. Assume we don't have directories nested deeper than 10 items. - p = Path(os.getcwd()) - for i in range(10): - if p is None: - break - if Path(p / ".git").exists(): - return str(p) - # .git is not available in repos cloned via Cloud Build - # setup.py is always in the library's root, so use that instead - # https://github.com/googleapis/synthtool/issues/792 - if Path(p / "setup.py").exists(): - return str(p) - p = p.parent - raise Exception("Unable to detect repository root.") - - -GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) - - -@nox.session -@nox.parametrize("path", GENERATED_READMES) -def readmegen(session: nox.sessions.Session, path: str) -> None: - """(Re-)generates the readme for a sample.""" - session.install("jinja2", "pyyaml") - dir_ = os.path.dirname(path) - - if os.path.exists(os.path.join(dir_, "requirements.txt")): - session.install("-r", os.path.join(dir_, "requirements.txt")) - - in_file = os.path.join(dir_, "README.rst.in") - session.run( - "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file - ) diff --git a/language/snippets/generated-samples/v1/noxfile.py b/language/snippets/generated-samples/v1/noxfile.py deleted file mode 100644 index c1715136d645..000000000000 --- a/language/snippets/generated-samples/v1/noxfile.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import glob -import os -from pathlib import Path -import sys -from typing import Callable, Dict, List, Optional - -import nox - -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING -# DO NOT EDIT THIS FILE EVER! -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING - -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" - -# Copy `noxfile_config.py` to your directory and modify it instead. - -# `TEST_CONFIG` dict is a configuration hook that allows users to -# modify the test configurations. The values here should be in sync -# with `noxfile_config.py`. Users will copy `noxfile_config.py` into -# their directory and modify it. - -TEST_CONFIG = { - # You can opt out from the test for specific Python versions. - "ignored_versions": [], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": False, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} - - -try: - # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") - from noxfile_config import TEST_CONFIG_OVERRIDE -except ImportError as e: - print("No user noxfile_config found: detail: {}".format(e)) - TEST_CONFIG_OVERRIDE = {} - -# Update the TEST_CONFIG with the user supplied values. -TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) - - -def get_pytest_env_vars() -> Dict[str, str]: - """Returns a dict for pytest invocation.""" - ret = {} - - # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] - # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] - - # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) - return ret - - -# DO NOT EDIT - automatically generated. -# All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] - -# Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] - -TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) - -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( - "True", - "true", -) - -# Error if a python version is missing -nox.options.error_on_missing_interpreters = True - -# -# Style Checks -# - - -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - -# Linting with flake8. -# -# We ignore the following rules: -# E203: whitespace before ‘:’ -# E266: too many leading ‘#’ for block comment -# E501: line too long -# I202: Additional newline in a section of imports -# -# We also need to specify the rules which are ignored by default: -# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] -FLAKE8_COMMON_ARGS = [ - "--show-source", - "--builtin=gettext", - "--max-complexity=20", - "--import-order-style=google", - "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", - "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", - "--max-line-length=88", -] - - -@nox.session -def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") - else: - session.install("flake8", "flake8-import-order", "flake8-annotations") - - local_names = _determine_local_import_names(".") - args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), - ".", - ] - session.run("flake8", *args) - - -# -# Black -# - - -@nox.session -def blacken(session: nox.sessions.Session) -> None: - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - session.run("black", *python_files) - - -# -# format = isort + black -# - - -@nox.session -def format(session: nox.sessions.Session) -> None: - """ - Run isort to sort imports. Then run black - to format code to uniform standard. - """ - session.install(BLACK_VERSION, ISORT_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - # Use the --fss option to sort imports using strict alphabetical order. - # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections - session.run("isort", "--fss", *python_files) - session.run("black", *python_files) - - -# -# Sample Tests -# - - -PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] - - -def _session_tests( - session: nox.sessions.Session, post_install: Callable = None -) -> None: - # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) - test_list.extend(glob.glob("**/tests", recursive=True)) - - if len(test_list) == 0: - print("No tests found, skipping directory.") - return - - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - concurrent_args = [] - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - with open("requirements.txt") as rfile: - packages = rfile.read() - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - with open("requirements-test.txt") as rtfile: - packages += rtfile.read() - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) - elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) - - -@nox.session(python=ALL_VERSIONS) -def py(session: nox.sessions.Session) -> None: - """Runs py.test for a sample using the specified version of Python.""" - if session.python in TESTED_VERSIONS: - _session_tests(session) - else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) - - -# -# Readmegen -# - - -def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" - # Get root of this repository. Assume we don't have directories nested deeper than 10 items. - p = Path(os.getcwd()) - for i in range(10): - if p is None: - break - if Path(p / ".git").exists(): - return str(p) - # .git is not available in repos cloned via Cloud Build - # setup.py is always in the library's root, so use that instead - # https://github.com/googleapis/synthtool/issues/792 - if Path(p / "setup.py").exists(): - return str(p) - p = p.parent - raise Exception("Unable to detect repository root.") - - -GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) - - -@nox.session -@nox.parametrize("path", GENERATED_READMES) -def readmegen(session: nox.sessions.Session, path: str) -> None: - """(Re-)generates the readme for a sample.""" - session.install("jinja2", "pyyaml") - dir_ = os.path.dirname(path) - - if os.path.exists(os.path.join(dir_, "requirements.txt")): - session.install("-r", os.path.join(dir_, "requirements.txt")) - - in_file = os.path.join(dir_, "README.rst.in") - session.run( - "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file - ) diff --git a/language/snippets/sentiment/noxfile.py b/language/snippets/sentiment/noxfile.py deleted file mode 100644 index c1715136d645..000000000000 --- a/language/snippets/sentiment/noxfile.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import glob -import os -from pathlib import Path -import sys -from typing import Callable, Dict, List, Optional - -import nox - -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING -# DO NOT EDIT THIS FILE EVER! -# WARNING - WARNING - WARNING - WARNING - WARNING -# WARNING - WARNING - WARNING - WARNING - WARNING - -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" - -# Copy `noxfile_config.py` to your directory and modify it instead. - -# `TEST_CONFIG` dict is a configuration hook that allows users to -# modify the test configurations. The values here should be in sync -# with `noxfile_config.py`. Users will copy `noxfile_config.py` into -# their directory and modify it. - -TEST_CONFIG = { - # You can opt out from the test for specific Python versions. - "ignored_versions": [], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": False, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} - - -try: - # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") - from noxfile_config import TEST_CONFIG_OVERRIDE -except ImportError as e: - print("No user noxfile_config found: detail: {}".format(e)) - TEST_CONFIG_OVERRIDE = {} - -# Update the TEST_CONFIG with the user supplied values. -TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) - - -def get_pytest_env_vars() -> Dict[str, str]: - """Returns a dict for pytest invocation.""" - ret = {} - - # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] - # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] - - # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) - return ret - - -# DO NOT EDIT - automatically generated. -# All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] - -# Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] - -TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) - -INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( - "True", - "true", -) - -# Error if a python version is missing -nox.options.error_on_missing_interpreters = True - -# -# Style Checks -# - - -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - -# Linting with flake8. -# -# We ignore the following rules: -# E203: whitespace before ‘:’ -# E266: too many leading ‘#’ for block comment -# E501: line too long -# I202: Additional newline in a section of imports -# -# We also need to specify the rules which are ignored by default: -# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] -FLAKE8_COMMON_ARGS = [ - "--show-source", - "--builtin=gettext", - "--max-complexity=20", - "--import-order-style=google", - "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", - "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", - "--max-line-length=88", -] - - -@nox.session -def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") - else: - session.install("flake8", "flake8-import-order", "flake8-annotations") - - local_names = _determine_local_import_names(".") - args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), - ".", - ] - session.run("flake8", *args) - - -# -# Black -# - - -@nox.session -def blacken(session: nox.sessions.Session) -> None: - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - session.run("black", *python_files) - - -# -# format = isort + black -# - - -@nox.session -def format(session: nox.sessions.Session) -> None: - """ - Run isort to sort imports. Then run black - to format code to uniform standard. - """ - session.install(BLACK_VERSION, ISORT_VERSION) - python_files = [path for path in os.listdir(".") if path.endswith(".py")] - - # Use the --fss option to sort imports using strict alphabetical order. - # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections - session.run("isort", "--fss", *python_files) - session.run("black", *python_files) - - -# -# Sample Tests -# - - -PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] - - -def _session_tests( - session: nox.sessions.Session, post_install: Callable = None -) -> None: - # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) - test_list.extend(glob.glob("**/tests", recursive=True)) - - if len(test_list) == 0: - print("No tests found, skipping directory.") - return - - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - concurrent_args = [] - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - with open("requirements.txt") as rfile: - packages = rfile.read() - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - with open("requirements-test.txt") as rtfile: - packages += rtfile.read() - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) - elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) - - -@nox.session(python=ALL_VERSIONS) -def py(session: nox.sessions.Session) -> None: - """Runs py.test for a sample using the specified version of Python.""" - if session.python in TESTED_VERSIONS: - _session_tests(session) - else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) - - -# -# Readmegen -# - - -def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" - # Get root of this repository. Assume we don't have directories nested deeper than 10 items. - p = Path(os.getcwd()) - for i in range(10): - if p is None: - break - if Path(p / ".git").exists(): - return str(p) - # .git is not available in repos cloned via Cloud Build - # setup.py is always in the library's root, so use that instead - # https://github.com/googleapis/synthtool/issues/792 - if Path(p / "setup.py").exists(): - return str(p) - p = p.parent - raise Exception("Unable to detect repository root.") - - -GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) - - -@nox.session -@nox.parametrize("path", GENERATED_READMES) -def readmegen(session: nox.sessions.Session, path: str) -> None: - """(Re-)generates the readme for a sample.""" - session.install("jinja2", "pyyaml") - dir_ = os.path.dirname(path) - - if os.path.exists(os.path.join(dir_, "requirements.txt")): - session.install("-r", os.path.join(dir_, "requirements.txt")) - - in_file = os.path.join(dir_, "README.rst.in") - session.run( - "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file - ) diff --git a/language/v1/requirements-test.txt b/language/v1/requirements-test.txt new file mode 100644 index 000000000000..49780e035690 --- /dev/null +++ b/language/v1/requirements-test.txt @@ -0,0 +1 @@ +pytest==7.2.0 diff --git a/language/v1/requirements.txt b/language/v1/requirements.txt new file mode 100644 index 000000000000..c3458e3d62f8 --- /dev/null +++ b/language/v1/requirements.txt @@ -0,0 +1 @@ +google-cloud-language==2.6.1 From aa1e878f83c3a7886c354ad39616c6a8d32a25f4 Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Fri, 18 Nov 2022 14:45:30 +0100 Subject: [PATCH 314/323] Adding CODEOWNERS and blunderbuss config --- .github/CODEOWNERS | 3 ++- .github/blunderbuss.yml | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0458dce6e0af..998ddd70b7f5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -52,6 +52,7 @@ /jobs/**/* @GoogleCloudPlatform/python-samples-reviewers /kubernetes_engine/**/* @GoogleCloudPlatform/python-samples-reviewers /kubernetes_engine/django_tutorial/**/* @glasnt @GoogleCloudPlatform/python-samples-reviewers +/language/**/* @googleapis/python-samples-reviewers @googleapis/cdpe-cloudai /media_cdn/**/* @justin-mp @msampathkumar @GoogleCloudPlatform/python-samples-reviewers /memorystore/**/* @GoogleCloudPlatform/python-samples-reviewers /ml_engine/**/* @ivanmkc @GoogleCloudPlatform/python-samples-reviewers @@ -78,4 +79,4 @@ /workflows/**/* @GoogleCloudPlatform/python-samples-reviewers /datacatalog/**/* @GoogleCloudPlatform/python-samples-reviewers /kms/**/** @GoogleCloudPlatform/dee-infra @GoogleCloudPlatform/python-samples-reviewers -/dataproc/**/** @GoogleCloudPlatform/cloud-dpes @GoogleCloudPlatform/python-samples-reviewers \ No newline at end of file +/dataproc/**/** @GoogleCloudPlatform/cloud-dpes @GoogleCloudPlatform/python-samples-reviewers diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml index 1689daa99926..6372faf3158a 100644 --- a/.github/blunderbuss.yml +++ b/.github/blunderbuss.yml @@ -83,6 +83,10 @@ assign_issues_by: - 'api: cloudiot' to: - GoogleCloudPlatform/api-iot +- labels: + - 'api: language' + to: + - googleapis/cdpe-cloudai - labels: - 'api: ml' to: From 9bdb8d8c8866f51c1dc4e69e65c4e626f1a02744 Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Fri, 18 Nov 2022 14:51:33 +0100 Subject: [PATCH 315/323] Fixing license headers --- language/snippets/api/analyze_test.py | 2 +- language/snippets/classify_text/classify_text_tutorial.py | 2 +- language/snippets/classify_text/classify_text_tutorial_test.py | 2 +- language/snippets/cloud-client/v1/quickstart.py | 2 +- language/snippets/cloud-client/v1/quickstart_test.py | 2 +- language/snippets/sentiment/sentiment_analysis.py | 2 +- language/snippets/sentiment/sentiment_analysis_test.py | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/language/snippets/api/analyze_test.py b/language/snippets/api/analyze_test.py index da5f0ab0c4d9..0b4a72bbab86 100644 --- a/language/snippets/api/analyze_test.py +++ b/language/snippets/api/analyze_test.py @@ -1,4 +1,4 @@ -# Copyright 2016, Google, Inc. +# Copyright 2016 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/language/snippets/classify_text/classify_text_tutorial.py b/language/snippets/classify_text/classify_text_tutorial.py index de35451dd0f7..11adb1be4d5e 100644 --- a/language/snippets/classify_text/classify_text_tutorial.py +++ b/language/snippets/classify_text/classify_text_tutorial.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2017, Google, Inc. +# Copyright 2017 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/language/snippets/classify_text/classify_text_tutorial_test.py b/language/snippets/classify_text/classify_text_tutorial_test.py index 5859a771ea23..1f095d0ebb64 100644 --- a/language/snippets/classify_text/classify_text_tutorial_test.py +++ b/language/snippets/classify_text/classify_text_tutorial_test.py @@ -1,4 +1,4 @@ -# Copyright 2016, Google, Inc. +# Copyright 2016 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/language/snippets/cloud-client/v1/quickstart.py b/language/snippets/cloud-client/v1/quickstart.py index bbc914d16149..b3532f2d56fc 100644 --- a/language/snippets/cloud-client/v1/quickstart.py +++ b/language/snippets/cloud-client/v1/quickstart.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/snippets/cloud-client/v1/quickstart_test.py b/language/snippets/cloud-client/v1/quickstart_test.py index 59b44da841d4..065ff2f7409b 100644 --- a/language/snippets/cloud-client/v1/quickstart_test.py +++ b/language/snippets/cloud-client/v1/quickstart_test.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/language/snippets/sentiment/sentiment_analysis.py b/language/snippets/sentiment/sentiment_analysis.py index e82c3a68ae86..ff09520f6fd1 100644 --- a/language/snippets/sentiment/sentiment_analysis.py +++ b/language/snippets/sentiment/sentiment_analysis.py @@ -1,4 +1,4 @@ -# Copyright 2016, Google, Inc. +# Copyright 2016 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index 845e842f7517..7fc60c79cc6d 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -1,4 +1,4 @@ -# Copyright 2016, Google, Inc. +# Copyright 2016# [END job_discovery_batch_ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at From c71cbf19f2a40c76a76a9c5bf1cf50ce83807c82 Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Fri, 18 Nov 2022 15:14:02 +0100 Subject: [PATCH 316/323] Fixing copywrite header --- language/snippets/sentiment/sentiment_analysis_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/language/snippets/sentiment/sentiment_analysis_test.py b/language/snippets/sentiment/sentiment_analysis_test.py index 7fc60c79cc6d..14718b20042a 100644 --- a/language/snippets/sentiment/sentiment_analysis_test.py +++ b/language/snippets/sentiment/sentiment_analysis_test.py @@ -1,4 +1,4 @@ -# Copyright 2016# [END job_discovery_batch_ +# Copyright 2016 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at From 5df4e95d5f140b20c1c50acc6517edc8dcb7bfaf Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Fri, 18 Nov 2022 15:16:58 +0100 Subject: [PATCH 317/323] Adjusting header checker for language samples --- .github/header-checker-lint.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index 9fba9f2ee475..771dc07e22f4 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -18,6 +18,8 @@ ignoreFiles: - "**/ghcnd-stations.txt" - "texttospeech/snippets/resources/example.txt" - "texttospeech/snippets/resources/hello.txt" + - "language/**/*.txt" + - "language/v1/test/*.yaml" ignoreLicenseYear: true From a2cbc192f5ee2eb3789febf0ebaa13c4d6a16351 Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Fri, 18 Nov 2022 16:17:41 +0100 Subject: [PATCH 318/323] Lint fix --- language/v1/language_classify_gcs.py | 13 +++++-- language/v1/language_classify_text.py | 22 ++++++----- language/v1/language_entities_gcs.py | 27 ++++++++----- language/v1/language_entities_text.py | 21 ++++++---- language/v1/language_entity_sentiment_gcs.py | 31 +++++++++------ language/v1/language_entity_sentiment_text.py | 25 +++++++----- language/v1/language_sentiment_gcs.py | 25 +++++++----- language/v1/language_sentiment_text.py | 19 ++++----- language/v1/language_syntax_gcs.py | 39 +++++++++++++------ language/v1/language_syntax_text.py | 33 +++++++++++----- 10 files changed, 163 insertions(+), 92 deletions(-) diff --git a/language/v1/language_classify_gcs.py b/language/v1/language_classify_gcs.py index b357a8aed07e..e0289696fc4b 100644 --- a/language/v1/language_classify_gcs.py +++ b/language/v1/language_classify_gcs.py @@ -27,6 +27,7 @@ # [START language_classify_gcs] from google.cloud import language_v1 + def sample_classify_text(gcs_content_uri): """ Classifying Content in text file stored in Cloud Storage @@ -48,18 +49,22 @@ def sample_classify_text(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} + document = { + "gcs_content_uri": gcs_content_uri, + "type_": type_, + "language": language, + } - response = client.classify_text(request = {'document': document}) + response = client.classify_text(request={"document": document}) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. # See the predefined taxonomy of categories: # https://cloud.google.com/natural-language/docs/categories - print(u"Category name: {}".format(category.name)) + print("Category name: {}".format(category.name)) # Get the confidence. Number representing how certain the classifier # is that this category represents the provided text. - print(u"Confidence: {}".format(category.confidence)) + print("Confidence: {}".format(category.confidence)) # [END language_classify_gcs] diff --git a/language/v1/language_classify_text.py b/language/v1/language_classify_text.py index fe2b56719cab..8c28342bf655 100644 --- a/language/v1/language_classify_text.py +++ b/language/v1/language_classify_text.py @@ -27,6 +27,7 @@ # [START language_classify_text] from google.cloud import language_v1 + def sample_classify_text(text_content): """ Classifying Content in a String @@ -49,24 +50,25 @@ def sample_classify_text(text_content): document = {"content": text_content, "type_": type_, "language": language} content_categories_version = ( - language_v1.ClassificationModelOptions.V2Model.ContentCategoriesVersion.V2) - response = client.classify_text(request = { - "document": document, - "classification_model_options": { - "v2_model": { - "content_categories_version": content_categories_version - } + language_v1.ClassificationModelOptions.V2Model.ContentCategoriesVersion.V2 + ) + response = client.classify_text( + request={ + "document": document, + "classification_model_options": { + "v2_model": {"content_categories_version": content_categories_version} + }, } - }) + ) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. # See the predefined taxonomy of categories: # https://cloud.google.com/natural-language/docs/categories - print(u"Category name: {}".format(category.name)) + print("Category name: {}".format(category.name)) # Get the confidence. Number representing how certain the classifier # is that this category represents the provided text. - print(u"Confidence: {}".format(category.confidence)) + print("Confidence: {}".format(category.confidence)) # [END language_classify_text] diff --git a/language/v1/language_entities_gcs.py b/language/v1/language_entities_gcs.py index 6bdb85772936..7ae0dbbcd0ec 100644 --- a/language/v1/language_entities_gcs.py +++ b/language/v1/language_entities_gcs.py @@ -27,6 +27,7 @@ # [START language_entities_gcs] from google.cloud import language_v1 + def sample_analyze_entities(gcs_content_uri): """ Analyzing Entities in text file stored in Cloud Storage @@ -47,39 +48,47 @@ def sample_analyze_entities(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} + document = { + "gcs_content_uri": gcs_content_uri, + "type_": type_, + "language": language, + } # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_entities( + request={"document": document, "encoding_type": encoding_type} + ) # Loop through entitites returned from the API for entity in response.entities: - print(u"Representative name for the entity: {}".format(entity.name)) + print("Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) + print("Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range - print(u"Salience score: {}".format(entity.salience)) + print("Salience score: {}".format(entity.salience)) # Loop over the metadata associated with entity. For many known entities, # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). # Some entity types may have additional metadata, e.g. ADDRESS entities # may have metadata for the address street_name, postal_code, et al. for metadata_name, metadata_value in entity.metadata.items(): - print(u"{}: {}".format(metadata_name, metadata_value)) + print("{}: {}".format(metadata_name, metadata_value)) # Loop over the mentions of this entity in the input document. # The API currently supports proper noun mentions. for mention in entity.mentions: - print(u"Mention text: {}".format(mention.text.content)) + print("Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) + "Mention type: {}".format( + language_v1.EntityMention.Type(mention.type_).name + ) ) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_entities_gcs] diff --git a/language/v1/language_entities_text.py b/language/v1/language_entities_text.py index 2cce0015d04b..41624bbffad9 100644 --- a/language/v1/language_entities_text.py +++ b/language/v1/language_entities_text.py @@ -27,6 +27,7 @@ # [START language_entities_text] from google.cloud import language_v1 + def sample_analyze_entities(text_content): """ Analyzing Entities in a String @@ -51,39 +52,43 @@ def sample_analyze_entities(text_content): # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_entities( + request={"document": document, "encoding_type": encoding_type} + ) # Loop through entitites returned from the API for entity in response.entities: - print(u"Representative name for the entity: {}".format(entity.name)) + print("Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) + print("Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range - print(u"Salience score: {}".format(entity.salience)) + print("Salience score: {}".format(entity.salience)) # Loop over the metadata associated with entity. For many known entities, # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). # Some entity types may have additional metadata, e.g. ADDRESS entities # may have metadata for the address street_name, postal_code, et al. for metadata_name, metadata_value in entity.metadata.items(): - print(u"{}: {}".format(metadata_name, metadata_value)) + print("{}: {}".format(metadata_name, metadata_value)) # Loop over the mentions of this entity in the input document. # The API currently supports proper noun mentions. for mention in entity.mentions: - print(u"Mention text: {}".format(mention.text.content)) + print("Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) + "Mention type: {}".format( + language_v1.EntityMention.Type(mention.type_).name + ) ) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_entities_text] diff --git a/language/v1/language_entity_sentiment_gcs.py b/language/v1/language_entity_sentiment_gcs.py index dba3dc1bb76a..df5eb2806769 100644 --- a/language/v1/language_entity_sentiment_gcs.py +++ b/language/v1/language_entity_sentiment_gcs.py @@ -27,6 +27,7 @@ # [START language_entity_sentiment_gcs] from google.cloud import language_v1 + def sample_analyze_entity_sentiment(gcs_content_uri): """ Analyzing Entity Sentiment in text file stored in Cloud Storage @@ -47,43 +48,51 @@ def sample_analyze_entity_sentiment(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} + document = { + "gcs_content_uri": gcs_content_uri, + "type_": type_, + "language": language, + } # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_entity_sentiment( + request={"document": document, "encoding_type": encoding_type} + ) # Loop through entitites returned from the API for entity in response.entities: - print(u"Representative name for the entity: {}".format(entity.name)) + print("Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) + print("Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range - print(u"Salience score: {}".format(entity.salience)) + print("Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. sentiment = entity.sentiment - print(u"Entity sentiment score: {}".format(sentiment.score)) - print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude)) + print("Entity sentiment score: {}".format(sentiment.score)) + print("Entity sentiment magnitude: {}".format(sentiment.magnitude)) # Loop over the metadata associated with entity. For many known entities, # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). # Some entity types may have additional metadata, e.g. ADDRESS entities # may have metadata for the address street_name, postal_code, et al. for metadata_name, metadata_value in entity.metadata.items(): - print(u"{} = {}".format(metadata_name, metadata_value)) + print("{} = {}".format(metadata_name, metadata_value)) # Loop over the mentions of this entity in the input document. # The API currently supports proper noun mentions. for mention in entity.mentions: - print(u"Mention text: {}".format(mention.text.content)) + print("Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) + "Mention type: {}".format( + language_v1.EntityMention.Type(mention.type_).name + ) ) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_entity_sentiment_gcs] diff --git a/language/v1/language_entity_sentiment_text.py b/language/v1/language_entity_sentiment_text.py index 4e1341d52158..27e06f006336 100644 --- a/language/v1/language_entity_sentiment_text.py +++ b/language/v1/language_entity_sentiment_text.py @@ -27,6 +27,7 @@ # [START language_entity_sentiment_text] from google.cloud import language_v1 + def sample_analyze_entity_sentiment(text_content): """ Analyzing Entity Sentiment in a String @@ -51,38 +52,42 @@ def sample_analyze_entity_sentiment(text_content): # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_entity_sentiment( + request={"document": document, "encoding_type": encoding_type} + ) # Loop through entitites returned from the API for entity in response.entities: - print(u"Representative name for the entity: {}".format(entity.name)) + print("Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) + print("Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range - print(u"Salience score: {}".format(entity.salience)) + print("Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. sentiment = entity.sentiment - print(u"Entity sentiment score: {}".format(sentiment.score)) - print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude)) + print("Entity sentiment score: {}".format(sentiment.score)) + print("Entity sentiment magnitude: {}".format(sentiment.magnitude)) # Loop over the metadata associated with entity. For many known entities, # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). # Some entity types may have additional metadata, e.g. ADDRESS entities # may have metadata for the address street_name, postal_code, et al. for metadata_name, metadata_value in entity.metadata.items(): - print(u"{} = {}".format(metadata_name, metadata_value)) + print("{} = {}".format(metadata_name, metadata_value)) # Loop over the mentions of this entity in the input document. # The API currently supports proper noun mentions. for mention in entity.mentions: - print(u"Mention text: {}".format(mention.text.content)) + print("Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) + "Mention type: {}".format( + language_v1.EntityMention.Type(mention.type_).name + ) ) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_entity_sentiment_text] diff --git a/language/v1/language_sentiment_gcs.py b/language/v1/language_sentiment_gcs.py index f225db1c022d..f297c3867426 100644 --- a/language/v1/language_sentiment_gcs.py +++ b/language/v1/language_sentiment_gcs.py @@ -27,6 +27,7 @@ # [START language_sentiment_gcs] from google.cloud import language_v1 + def sample_analyze_sentiment(gcs_content_uri): """ Analyzing Sentiment in text file stored in Cloud Storage @@ -47,29 +48,33 @@ def sample_analyze_sentiment(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} + document = { + "gcs_content_uri": gcs_content_uri, + "type_": type_, + "language": language, + } # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_sentiment( + request={"document": document, "encoding_type": encoding_type} + ) # Get overall sentiment of the input document - print(u"Document sentiment score: {}".format(response.document_sentiment.score)) + print("Document sentiment score: {}".format(response.document_sentiment.score)) print( - u"Document sentiment magnitude: {}".format( - response.document_sentiment.magnitude - ) + "Document sentiment magnitude: {}".format(response.document_sentiment.magnitude) ) # Get sentiment for all sentences in the document for sentence in response.sentences: - print(u"Sentence text: {}".format(sentence.text.content)) - print(u"Sentence sentiment score: {}".format(sentence.sentiment.score)) - print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) + print("Sentence text: {}".format(sentence.text.content)) + print("Sentence sentiment score: {}".format(sentence.sentiment.score)) + print("Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_sentiment_gcs] diff --git a/language/v1/language_sentiment_text.py b/language/v1/language_sentiment_text.py index d94420a39277..559512d8853f 100644 --- a/language/v1/language_sentiment_text.py +++ b/language/v1/language_sentiment_text.py @@ -27,6 +27,7 @@ # [START language_sentiment_text] from google.cloud import language_v1 + def sample_analyze_sentiment(text_content): """ Analyzing Sentiment in a String @@ -51,24 +52,24 @@ def sample_analyze_sentiment(text_content): # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_sentiment( + request={"document": document, "encoding_type": encoding_type} + ) # Get overall sentiment of the input document - print(u"Document sentiment score: {}".format(response.document_sentiment.score)) + print("Document sentiment score: {}".format(response.document_sentiment.score)) print( - u"Document sentiment magnitude: {}".format( - response.document_sentiment.magnitude - ) + "Document sentiment magnitude: {}".format(response.document_sentiment.magnitude) ) # Get sentiment for all sentences in the document for sentence in response.sentences: - print(u"Sentence text: {}".format(sentence.text.content)) - print(u"Sentence sentiment score: {}".format(sentence.sentiment.score)) - print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) + print("Sentence text: {}".format(sentence.text.content)) + print("Sentence sentiment score: {}".format(sentence.sentiment.score)) + print("Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_sentiment_text] diff --git a/language/v1/language_syntax_gcs.py b/language/v1/language_syntax_gcs.py index 32c64edefa6d..41902f21df50 100644 --- a/language/v1/language_syntax_gcs.py +++ b/language/v1/language_syntax_gcs.py @@ -27,6 +27,7 @@ # [START language_syntax_gcs] from google.cloud import language_v1 + def sample_analyze_syntax(gcs_content_uri): """ Analyzing Syntax in text file stored in Cloud Storage @@ -47,19 +48,25 @@ def sample_analyze_syntax(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} + document = { + "gcs_content_uri": gcs_content_uri, + "type_": type_, + "language": language, + } # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_syntax(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_syntax( + request={"document": document, "encoding_type": encoding_type} + ) # Loop through tokens returned from the API for token in response.tokens: # Get the text content of this token. Usually a word or punctuation. text = token.text - print(u"Token text: {}".format(text.content)) + print("Token text: {}".format(text.content)) print( - u"Location of this token in overall document: {}".format(text.begin_offset) + "Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. # Part of speech is defined in: @@ -67,31 +74,41 @@ def sample_analyze_syntax(gcs_content_uri): part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. print( - u"Part of Speech tag: {}".format( + "Part of Speech tag: {}".format( language_v1.PartOfSpeech.Tag(part_of_speech.tag).name ) ) # Get the voice, e.g. ACTIVE or PASSIVE - print(u"Voice: {}".format(language_v1.PartOfSpeech.Voice(part_of_speech.voice).name)) + print( + "Voice: {}".format( + language_v1.PartOfSpeech.Voice(part_of_speech.voice).name + ) + ) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. - print(u"Tense: {}".format(language_v1.PartOfSpeech.Tense(part_of_speech.tense).name)) + print( + "Tense: {}".format( + language_v1.PartOfSpeech.Tense(part_of_speech.tense).name + ) + ) # See API reference for additional Part of Speech information available # Get the lemma of the token. Wikipedia lemma description # https://en.wikipedia.org/wiki/Lemma_(morphology) - print(u"Lemma: {}".format(token.lemma)) + print("Lemma: {}".format(token.lemma)) # Get the dependency tree parse information for this token. # For more information on dependency labels: # http://www.aclweb.org/anthology/P13-2017 dependency_edge = token.dependency_edge - print(u"Head token index: {}".format(dependency_edge.head_token_index)) + print("Head token index: {}".format(dependency_edge.head_token_index)) print( - u"Label: {}".format(language_v1.DependencyEdge.Label(dependency_edge.label).name) + "Label: {}".format( + language_v1.DependencyEdge.Label(dependency_edge.label).name + ) ) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_syntax_gcs] diff --git a/language/v1/language_syntax_text.py b/language/v1/language_syntax_text.py index 132c577922bf..044234713c69 100644 --- a/language/v1/language_syntax_text.py +++ b/language/v1/language_syntax_text.py @@ -27,6 +27,7 @@ # [START language_syntax_text] from google.cloud import language_v1 + def sample_analyze_syntax(text_content): """ Analyzing Syntax in a String @@ -51,14 +52,16 @@ def sample_analyze_syntax(text_content): # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_syntax(request = {'document': document, 'encoding_type': encoding_type}) + response = client.analyze_syntax( + request={"document": document, "encoding_type": encoding_type} + ) # Loop through tokens returned from the API for token in response.tokens: # Get the text content of this token. Usually a word or punctuation. text = token.text - print(u"Token text: {}".format(text.content)) + print("Token text: {}".format(text.content)) print( - u"Location of this token in overall document: {}".format(text.begin_offset) + "Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. # Part of speech is defined in: @@ -66,31 +69,41 @@ def sample_analyze_syntax(text_content): part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. print( - u"Part of Speech tag: {}".format( + "Part of Speech tag: {}".format( language_v1.PartOfSpeech.Tag(part_of_speech.tag).name ) ) # Get the voice, e.g. ACTIVE or PASSIVE - print(u"Voice: {}".format(language_v1.PartOfSpeech.Voice(part_of_speech.voice).name)) + print( + "Voice: {}".format( + language_v1.PartOfSpeech.Voice(part_of_speech.voice).name + ) + ) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. - print(u"Tense: {}".format(language_v1.PartOfSpeech.Tense(part_of_speech.tense).name)) + print( + "Tense: {}".format( + language_v1.PartOfSpeech.Tense(part_of_speech.tense).name + ) + ) # See API reference for additional Part of Speech information available # Get the lemma of the token. Wikipedia lemma description # https://en.wikipedia.org/wiki/Lemma_(morphology) - print(u"Lemma: {}".format(token.lemma)) + print("Lemma: {}".format(token.lemma)) # Get the dependency tree parse information for this token. # For more information on dependency labels: # http://www.aclweb.org/anthology/P13-2017 dependency_edge = token.dependency_edge - print(u"Head token index: {}".format(dependency_edge.head_token_index)) + print("Head token index: {}".format(dependency_edge.head_token_index)) print( - u"Label: {}".format(language_v1.DependencyEdge.Label(dependency_edge.label).name) + "Label: {}".format( + language_v1.DependencyEdge.Label(dependency_edge.label).name + ) ) # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, # the automatically-detected language. - print(u"Language of the text: {}".format(response.language)) + print("Language of the text: {}".format(response.language)) # [END language_syntax_text] From 42995b9cb5ddf5cc84f578e394c28e9e2dfcd139 Mon Sep 17 00:00:00 2001 From: Dan Lee <71398022+dandhlee@users.noreply.github.com> Date: Tue, 22 Nov 2022 02:41:53 -0600 Subject: [PATCH 319/323] Update .github/header-checker-lint.yml --- .github/header-checker-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index 771dc07e22f4..cefe36c6d0b0 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -18,7 +18,7 @@ ignoreFiles: - "**/ghcnd-stations.txt" - "texttospeech/snippets/resources/example.txt" - "texttospeech/snippets/resources/hello.txt" - - "language/**/*.txt" + - "language/**/resources/*.txt" - "language/v1/test/*.yaml" ignoreLicenseYear: true From 60ab1447ed5c55846d6db5048ba0ae8639d1d839 Mon Sep 17 00:00:00 2001 From: Dan Lee <71398022+dandhlee@users.noreply.github.com> Date: Tue, 22 Nov 2022 02:43:36 -0600 Subject: [PATCH 320/323] Update .github/header-checker-lint.yml --- .github/header-checker-lint.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index cefe36c6d0b0..91381e7451b2 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -19,6 +19,7 @@ ignoreFiles: - "texttospeech/snippets/resources/example.txt" - "texttospeech/snippets/resources/hello.txt" - "language/**/resources/*.txt" + - "language/snippets/classify_text/resources/texts/*.txt" - "language/v1/test/*.yaml" ignoreLicenseYear: true From 14f760df0d686602a689b0e3ec5f192a03ea27b5 Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Tue, 22 Nov 2022 14:51:26 +0100 Subject: [PATCH 321/323] Adding copyright headers to YAML files --- .github/header-checker-lint.yml | 1 - language/v1/test/analyzing_entities.test.yaml | 13 +++++++++++++ .../v1/test/analyzing_entity_sentiment.test.yaml | 13 +++++++++++++ language/v1/test/analyzing_sentiment.test.yaml | 13 +++++++++++++ language/v1/test/analyzing_syntax.test.yaml | 13 +++++++++++++ language/v1/test/classifying_content.test.yaml | 13 +++++++++++++ language/v1/test/samples.manifest.yaml | 13 +++++++++++++ 7 files changed, 78 insertions(+), 1 deletion(-) diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index 91381e7451b2..0dcaff7d0df6 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -20,7 +20,6 @@ ignoreFiles: - "texttospeech/snippets/resources/hello.txt" - "language/**/resources/*.txt" - "language/snippets/classify_text/resources/texts/*.txt" - - "language/v1/test/*.yaml" ignoreLicenseYear: true diff --git a/language/v1/test/analyzing_entities.test.yaml b/language/v1/test/analyzing_entities.test.yaml index 5fafd01eaa89..98765cb479ed 100644 --- a/language/v1/test/analyzing_entities.test.yaml +++ b/language/v1/test/analyzing_entities.test.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. type: test/samples schema_version: 1 test: diff --git a/language/v1/test/analyzing_entity_sentiment.test.yaml b/language/v1/test/analyzing_entity_sentiment.test.yaml index beb8fb4a89a7..41369f978b36 100644 --- a/language/v1/test/analyzing_entity_sentiment.test.yaml +++ b/language/v1/test/analyzing_entity_sentiment.test.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. type: test/samples schema_version: 1 test: diff --git a/language/v1/test/analyzing_sentiment.test.yaml b/language/v1/test/analyzing_sentiment.test.yaml index 55b5fdcb24d2..73b4410e422c 100644 --- a/language/v1/test/analyzing_sentiment.test.yaml +++ b/language/v1/test/analyzing_sentiment.test.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. type: test/samples schema_version: 1 test: diff --git a/language/v1/test/analyzing_syntax.test.yaml b/language/v1/test/analyzing_syntax.test.yaml index e89d465c1616..88df9ca040b9 100644 --- a/language/v1/test/analyzing_syntax.test.yaml +++ b/language/v1/test/analyzing_syntax.test.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. type: test/samples schema_version: 1 test: diff --git a/language/v1/test/classifying_content.test.yaml b/language/v1/test/classifying_content.test.yaml index 4b5f121d7d14..6218462da94e 100644 --- a/language/v1/test/classifying_content.test.yaml +++ b/language/v1/test/classifying_content.test.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. type: test/samples schema_version: 1 test: diff --git a/language/v1/test/samples.manifest.yaml b/language/v1/test/samples.manifest.yaml index aa270425584c..b60100c40dab 100644 --- a/language/v1/test/samples.manifest.yaml +++ b/language/v1/test/samples.manifest.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. type: manifest/samples schema_version: 3 base: &common From 2317c60b8334111424cb20c5f951d1e9072b3de5 Mon Sep 17 00:00:00 2001 From: Maciej Strzelczyk Date: Tue, 22 Nov 2022 14:51:47 +0100 Subject: [PATCH 322/323] Changing CODEOWNER for language --- .github/CODEOWNERS | 2 +- .github/blunderbuss.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 65a97c0a2268..8d0e50c0eb32 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -55,7 +55,7 @@ /kms/**/** @GoogleCloudPlatform/dee-infra @GoogleCloudPlatform/python-samples-reviewers /kubernetes_engine/**/* @GoogleCloudPlatform/python-samples-reviewers /kubernetes_engine/django_tutorial/**/* @glasnt @GoogleCloudPlatform/python-samples-reviewers -/language/**/* @googleapis/python-samples-reviewers @googleapis/cdpe-cloudai +/language/**/* @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/python-samples-reviewers /media_cdn/**/* @justin-mp @msampathkumar @GoogleCloudPlatform/python-samples-reviewers /memorystore/**/* @GoogleCloudPlatform/python-samples-reviewers /ml_engine/**/* @ivanmkc @GoogleCloudPlatform/python-samples-reviewers diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml index 4a3dfc5eabf4..70830901f191 100644 --- a/.github/blunderbuss.yml +++ b/.github/blunderbuss.yml @@ -88,7 +88,7 @@ assign_issues_by: - labels: - 'api: language' to: - - googleapis/cdpe-cloudai + - GoogleCloudPlatform/dee-data-ai - labels: - 'api: ml' to: From c5eb6ba4ca9479f2e349230a0282d3fde6d3a024 Mon Sep 17 00:00:00 2001 From: Dan Lee <71398022+dandhlee@users.noreply.github.com> Date: Tue, 22 Nov 2022 11:36:24 -0600 Subject: [PATCH 323/323] Apply suggestions from code review --- language/AUTHORING_GUIDE.md | 2 +- language/CONTRIBUTING.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/AUTHORING_GUIDE.md b/language/AUTHORING_GUIDE.md index 55c97b32f4c1..8249522ffc2d 100644 --- a/language/AUTHORING_GUIDE.md +++ b/language/AUTHORING_GUIDE.md @@ -1 +1 @@ -See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/language/CONTRIBUTING.md b/language/CONTRIBUTING.md index 34c882b6f1a3..f5fe2e6baf13 100644 --- a/language/CONTRIBUTING.md +++ b/language/CONTRIBUTING.md @@ -1 +1 @@ -See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/CONTRIBUTING.md \ No newline at end of file