From b8d6b067379e0f359f2d779bd94524d5993de597 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Tue, 8 Nov 2016 11:26:04 -0500 Subject: [PATCH 1/3] Add image.detect() for detecting multiple types. --- docs/index.rst | 1 + docs/vision-annotations.rst | 10 ++ docs/vision-usage.rst | 29 +++++ vision/google/cloud/vision/annotations.py | 130 ++++++++++++++++++++++ vision/google/cloud/vision/image.py | 89 +++++---------- vision/unit_tests/test_client.py | 45 ++++++++ 6 files changed, 245 insertions(+), 59 deletions(-) create mode 100644 docs/vision-annotations.rst create mode 100644 vision/google/cloud/vision/annotations.py diff --git a/docs/index.rst b/docs/index.rst index dfd557e17015..77269c0ba84e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -153,6 +153,7 @@ :caption: Vision vision-usage + vision-annotations vision-client vision-color vision-entity diff --git a/docs/vision-annotations.rst b/docs/vision-annotations.rst new file mode 100644 index 000000000000..cae8edcba349 --- /dev/null +++ b/docs/vision-annotations.rst @@ -0,0 +1,10 @@ +Vision Annotations +================== + +Image Annotations +~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.vision.annotations + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/vision-usage.rst b/docs/vision-usage.rst index 77d7a7ad0399..904b8539f3e1 100644 --- a/docs/vision-usage.rst +++ b/docs/vision-usage.rst @@ -31,6 +31,35 @@ or pass in ``credentials`` and ``project`` explicitly. >>> client = vision.Client(project='my-project', credentials=creds) +Manual Detection +~~~~~~~~~~~~~~~~ + +You can call the detection method manually. + +.. code-block:: python + + >>> from google.cloud import vision + >>> from google.cloud.vision.feature import Feature + >>> from google.cloud.vision.feature import FeatureTypes + >>> client = vision.Client() + >>> image = client.image(source_uri='gs://my-test-bucket/image.jpg') + >>> features = [Feature(FeatureTypes.FACE_DETECTION, 5), + ... Feature(FeatureTypes.LOGO_DETECTION, 3)] + >>> annotations = image.detect(features) + >>> len(annotations.faces) + 2 + >>> for face in annotations.faces: + ... print(face.joy_likelihood) + 0.94099093 + 0.54453093 + >>> len(annotations.logos) + 2 + >>> for logo in annotations.logos: + ... print(logo.description) + 'google' + 'github' + + Face Detection ~~~~~~~~~~~~~~ diff --git a/vision/google/cloud/vision/annotations.py b/vision/google/cloud/vision/annotations.py new file mode 100644 index 000000000000..421a9f4a7f7f --- /dev/null +++ b/vision/google/cloud/vision/annotations.py @@ -0,0 +1,130 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Annotations management for Vision API responses.""" + + +from google.cloud.vision.entity import EntityAnnotation +from google.cloud.vision.face import Face +from google.cloud.vision.feature import FeatureTypes +from google.cloud.vision.color import ImagePropertiesAnnotation +from google.cloud.vision.safe import SafeSearchAnnotation + + +_REVERSE_TYPES = { + FeatureTypes.FACE_DETECTION: 'faceAnnotations', + FeatureTypes.IMAGE_PROPERTIES: 'imagePropertiesAnnotation', + FeatureTypes.LABEL_DETECTION: 'labelAnnotations', + FeatureTypes.LANDMARK_DETECTION: 'landmarkAnnotations', + FeatureTypes.LOGO_DETECTION: 'logoAnnotations', + FeatureTypes.SAFE_SEARCH_DETECTION: 'safeSearchAnnotation', + FeatureTypes.TEXT_DETECTION: 'textAnnotations', +} + + +class Annotations(object): + """Annotation class for managing responses. + + :type faces: list + :param faces: List of :class:`~google.cloud.vision.face.Face`. + + :type properties: list + :param properties: + List of :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. + + :type labels: list + :param labels: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + + :type landmarks: list + :param landmarks: List of + :class:`~google.cloud.vision.entity.EntityAnnotation.` + + :type logos: list + :param logos: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + + :type safe_searches: list + :param safe_searches: + List of :class:`~google.cloud.vision.safe.SafeSearchAnnotation` + + :type texts: list + :param texts: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + def __init__(self, faces=None, properties=None, labels=None, + landmarks=None, logos=None, safe_searches=None, texts=None): + self.faces = faces or [] + self.properties = properties or [] + self.labels = labels or [] + self.landmarks = landmarks or [] + self.logos = logos or [] + self.safe_searches = safe_searches or [] + self.texts = texts or [] + + @classmethod + def from_api_repr(cls, response): + """Factory: construct an instance of ``Annotations`` from a response. + + :type response: dict + :param response: Vision API response object. + + :rtype: :class:`~google.cloud.vision.annotations.Annotations` + :returns: An instance of ``Annotations`` with detection types loaded. + """ + annotations = {} + + for feature_type in response.keys(): + annotations[feature_type] = [] + + for feature_type, annotation in response.items(): + annotations[feature_type].extend( + _entity_from_response_type(feature_type, annotation)) + + faces = annotations.get( + _REVERSE_TYPES[FeatureTypes.FACE_DETECTION], []) + properties = annotations.get( + _REVERSE_TYPES[FeatureTypes.IMAGE_PROPERTIES], []) + labels = annotations.get( + _REVERSE_TYPES[FeatureTypes.LABEL_DETECTION], []) + landmarks = annotations.get( + _REVERSE_TYPES[FeatureTypes.LANDMARK_DETECTION], []) + logos = annotations.get( + _REVERSE_TYPES[FeatureTypes.LOGO_DETECTION], []) + safe_searches = annotations.get( + _REVERSE_TYPES[FeatureTypes.SAFE_SEARCH_DETECTION], []) + texts = annotations.get( + _REVERSE_TYPES[FeatureTypes.TEXT_DETECTION], []) + + return cls(faces=faces, properties=properties, labels=labels, + landmarks=landmarks, logos=logos, + safe_searches=safe_searches, texts=texts) + + +def _entity_from_response_type(feature_type, results): + """Convert a JSON result to an entity type based on the feature.""" + + detected_objects = [] + if feature_type == _REVERSE_TYPES[FeatureTypes.FACE_DETECTION]: + detected_objects.extend( + Face.from_api_repr(face) for face in results) + elif feature_type == _REVERSE_TYPES[FeatureTypes.IMAGE_PROPERTIES]: + detected_objects.append( + ImagePropertiesAnnotation.from_api_repr(results)) + elif feature_type == _REVERSE_TYPES[FeatureTypes.SAFE_SEARCH_DETECTION]: + detected_objects.append(SafeSearchAnnotation.from_api_repr(results)) + else: + for result in results: + detected_objects.append(EntityAnnotation.from_api_repr(result)) + return detected_objects diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py index 48d49bf65983..9f7a2afcdaee 100644 --- a/vision/google/cloud/vision/image.py +++ b/vision/google/cloud/vision/image.py @@ -19,31 +19,9 @@ from google.cloud._helpers import _to_bytes from google.cloud._helpers import _bytes_to_unicode -from google.cloud.vision.entity import EntityAnnotation -from google.cloud.vision.face import Face +from google.cloud.vision.annotations import Annotations from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes -from google.cloud.vision.color import ImagePropertiesAnnotation -from google.cloud.vision.safe import SafeSearchAnnotation - - -_FACE_DETECTION = 'FACE_DETECTION' -_IMAGE_PROPERTIES = 'IMAGE_PROPERTIES' -_LABEL_DETECTION = 'LABEL_DETECTION' -_LANDMARK_DETECTION = 'LANDMARK_DETECTION' -_LOGO_DETECTION = 'LOGO_DETECTION' -_SAFE_SEARCH_DETECTION = 'SAFE_SEARCH_DETECTION' -_TEXT_DETECTION = 'TEXT_DETECTION' - -_REVERSE_TYPES = { - _FACE_DETECTION: 'faceAnnotations', - _IMAGE_PROPERTIES: 'imagePropertiesAnnotation', - _LABEL_DETECTION: 'labelAnnotations', - _LANDMARK_DETECTION: 'landmarkAnnotations', - _LOGO_DETECTION: 'logoAnnotations', - _SAFE_SEARCH_DETECTION: 'safeSearchAnnotation', - _TEXT_DETECTION: 'textAnnotations', -} class Image(object): @@ -105,7 +83,7 @@ def source(self): return self._source def _detect_annotation(self, features): - """Generic method for detecting a single annotation. + """Generic method for detecting annotations. :type features: list :param features: List of :class:`~google.cloud.vision.feature.Feature` @@ -118,12 +96,21 @@ def _detect_annotation(self, features): :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`, :class:`~google.cloud.vision.sage.SafeSearchAnnotation`, """ - detected_objects = [] results = self.client.annotate(self, features) - for feature in features: - detected_objects.extend( - _entity_from_response_type(feature.feature_type, results)) - return detected_objects + return Annotations.from_api_repr(results) + + def detect(self, features): + """Detect multiple feature types. + + :type features: list of :class:`~google.cloud.vision.feature.Feature` + :param features: List of the ``Feature`` indication the type of + annotation to perform. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + return self._detect_annotation(features) def detect_faces(self, limit=10): """Detect faces in image. @@ -135,7 +122,8 @@ def detect_faces(self, limit=10): :returns: List of :class:`~google.cloud.vision.face.Face`. """ features = [Feature(FeatureTypes.FACE_DETECTION, limit)] - return self._detect_annotation(features) + annotations = self._detect_annotation(features) + return annotations.faces def detect_labels(self, limit=10): """Detect labels that describe objects in an image. @@ -147,7 +135,8 @@ def detect_labels(self, limit=10): :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation` """ features = [Feature(FeatureTypes.LABEL_DETECTION, limit)] - return self._detect_annotation(features) + annotations = self._detect_annotation(features) + return annotations.labels def detect_landmarks(self, limit=10): """Detect landmarks in an image. @@ -160,7 +149,8 @@ def detect_landmarks(self, limit=10): :class:`~google.cloud.vision.entity.EntityAnnotation`. """ features = [Feature(FeatureTypes.LANDMARK_DETECTION, limit)] - return self._detect_annotation(features) + annotations = self._detect_annotation(features) + return annotations.landmarks def detect_logos(self, limit=10): """Detect logos in an image. @@ -173,7 +163,8 @@ def detect_logos(self, limit=10): :class:`~google.cloud.vision.entity.EntityAnnotation`. """ features = [Feature(FeatureTypes.LOGO_DETECTION, limit)] - return self._detect_annotation(features) + annotations = self._detect_annotation(features) + return annotations.logos def detect_properties(self, limit=10): """Detect the color properties of an image. @@ -186,7 +177,8 @@ def detect_properties(self, limit=10): :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. """ features = [Feature(FeatureTypes.IMAGE_PROPERTIES, limit)] - return self._detect_annotation(features) + annotations = self._detect_annotation(features) + return annotations.properties def detect_safe_search(self, limit=10): """Retreive safe search properties from an image. @@ -199,7 +191,8 @@ def detect_safe_search(self, limit=10): :class:`~google.cloud.vision.sage.SafeSearchAnnotation`. """ features = [Feature(FeatureTypes.SAFE_SEARCH_DETECTION, limit)] - return self._detect_annotation(features) + annotations = self._detect_annotation(features) + return annotations.safe_searches def detect_text(self, limit=10): """Detect text in an image. @@ -212,27 +205,5 @@ def detect_text(self, limit=10): :class:`~google.cloud.vision.entity.EntityAnnotation`. """ features = [Feature(FeatureTypes.TEXT_DETECTION, limit)] - return self._detect_annotation(features) - - -def _entity_from_response_type(feature_type, results): - """Convert a JSON result to an entity type based on the feature.""" - feature_key = _REVERSE_TYPES[feature_type] - annotations = results.get(feature_key, ()) - if not annotations: - return [] - - detected_objects = [] - if feature_type == _FACE_DETECTION: - detected_objects.extend( - Face.from_api_repr(face) for face in annotations) - elif feature_type == _IMAGE_PROPERTIES: - detected_objects.append( - ImagePropertiesAnnotation.from_api_repr(annotations)) - elif feature_type == _SAFE_SEARCH_DETECTION: - detected_objects.append( - SafeSearchAnnotation.from_api_repr(annotations)) - else: - for result in annotations: - detected_objects.append(EntityAnnotation.from_api_repr(result)) - return detected_objects + annotations = self._detect_annotation(features) + return annotations.texts diff --git a/vision/unit_tests/test_client.py b/vision/unit_tests/test_client.py index be3bda73f020..1f9a77e8bc79 100644 --- a/vision/unit_tests/test_client.py +++ b/vision/unit_tests/test_client.py @@ -81,6 +81,51 @@ def test_image_with_client(self): image = client.image(source_uri=IMAGE_SOURCE) self.assertIsInstance(image, Image) + def test_multiple_detection_from_content(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from unit_tests._fixtures import LABEL_DETECTION_RESPONSE + from unit_tests._fixtures import LOGO_DETECTION_RESPONSE + RETURNED = LABEL_DETECTION_RESPONSE + LOGOS = LOGO_DETECTION_RESPONSE['responses'][0]['logoAnnotations'] + RETURNED['responses'][0]['logoAnnotations'] = LOGOS + + credentials = _Credentials() + client = self._make_one(project=PROJECT, credentials=credentials) + client._connection = _Connection(RETURNED) + + limit = 2 + label_feature = Feature(FeatureTypes.LABEL_DETECTION, limit) + logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit) + features = [label_feature, logo_feature] + image = client.image(content=IMAGE_CONTENT) + items = image.detect(features) + + self.assertEqual(len(items.logos), 2) + self.assertEqual(len(items.labels), 3) + self.assertEqual(items.logos[0].description, 'Brand1') + self.assertEqual(items.logos[0].score, 0.63192177) + self.assertEqual(items.logos[1].description, 'Brand2') + self.assertEqual(items.logos[1].score, 0.5492993) + + self.assertEqual(items.labels[0].description, 'automobile') + self.assertEqual(items.labels[0].score, 0.9776855) + self.assertEqual(items.labels[1].description, 'vehicle') + self.assertEqual(items.labels[1].score, 0.947987) + self.assertEqual(items.labels[2].description, 'truck') + self.assertEqual(items.labels[2].score, 0.88429511) + + image_request = client._connection._requested[0]['data']['requests'][0] + label_request = image_request['features'][0] + logo_request = image_request['features'][1] + + self.assertEqual(B64_IMAGE_CONTENT, + image_request['image']['content']) + self.assertEqual(label_request['maxResults'], 2) + self.assertEqual(label_request['type'], 'LABEL_DETECTION') + self.assertEqual(logo_request['maxResults'], 2) + self.assertEqual(logo_request['type'], 'LOGO_DETECTION') + def test_face_detection_from_source(self): from google.cloud.vision.face import Face from unit_tests._fixtures import FACE_DETECTION_RESPONSE From a720bff257b10dbdb2fd05dfc3bac48d5f02410f Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Tue, 29 Nov 2016 15:59:14 -0500 Subject: [PATCH 2/3] Streamline annotations and filtering responses. --- docs/vision-annotations.rst | 3 - vision/google/cloud/vision/annotations.py | 82 +++++++++-------------- vision/unit_tests/test_client.py | 53 +++++++++------ 3 files changed, 64 insertions(+), 74 deletions(-) diff --git a/docs/vision-annotations.rst b/docs/vision-annotations.rst index cae8edcba349..57ac2acbe806 100644 --- a/docs/vision-annotations.rst +++ b/docs/vision-annotations.rst @@ -1,9 +1,6 @@ Vision Annotations ================== -Image Annotations -~~~~~~~~~~~~~~~~~ - .. automodule:: google.cloud.vision.annotations :members: :undoc-members: diff --git a/vision/google/cloud/vision/annotations.py b/vision/google/cloud/vision/annotations.py index 421a9f4a7f7f..64e0eeff6c70 100644 --- a/vision/google/cloud/vision/annotations.py +++ b/vision/google/cloud/vision/annotations.py @@ -15,26 +15,14 @@ """Annotations management for Vision API responses.""" +from google.cloud.vision.color import ImagePropertiesAnnotation from google.cloud.vision.entity import EntityAnnotation from google.cloud.vision.face import Face -from google.cloud.vision.feature import FeatureTypes -from google.cloud.vision.color import ImagePropertiesAnnotation from google.cloud.vision.safe import SafeSearchAnnotation -_REVERSE_TYPES = { - FeatureTypes.FACE_DETECTION: 'faceAnnotations', - FeatureTypes.IMAGE_PROPERTIES: 'imagePropertiesAnnotation', - FeatureTypes.LABEL_DETECTION: 'labelAnnotations', - FeatureTypes.LANDMARK_DETECTION: 'landmarkAnnotations', - FeatureTypes.LOGO_DETECTION: 'logoAnnotations', - FeatureTypes.SAFE_SEARCH_DETECTION: 'safeSearchAnnotation', - FeatureTypes.TEXT_DETECTION: 'textAnnotations', -} - - class Annotations(object): - """Annotation class for managing responses. + """Helper class to bundle annotation responses. :type faces: list :param faces: List of :class:`~google.cloud.vision.face.Face`. @@ -65,13 +53,13 @@ class Annotations(object): """ def __init__(self, faces=None, properties=None, labels=None, landmarks=None, logos=None, safe_searches=None, texts=None): - self.faces = faces or [] - self.properties = properties or [] - self.labels = labels or [] - self.landmarks = landmarks or [] - self.logos = logos or [] - self.safe_searches = safe_searches or [] - self.texts = texts or [] + self.faces = faces or () + self.properties = properties or () + self.labels = labels or () + self.landmarks = landmarks or () + self.logos = logos or () + self.safe_searches = safe_searches or () + self.texts = texts or () @classmethod def from_api_repr(cls, response): @@ -84,45 +72,41 @@ def from_api_repr(cls, response): :returns: An instance of ``Annotations`` with detection types loaded. """ annotations = {} - - for feature_type in response.keys(): - annotations[feature_type] = [] + key_map = { + 'faceAnnotations': 'faces', + 'imagePropertiesAnnotation': 'properties', + 'labelAnnotations': 'labels', + 'landmarkAnnotations': 'landmarks', + 'logoAnnotations': 'logos', + 'safeSearchAnnotation': 'safe_searches', + 'textAnnotations': 'texts' + } for feature_type, annotation in response.items(): - annotations[feature_type].extend( + curr_feature = annotations.setdefault(key_map[feature_type], []) + curr_feature.extend( _entity_from_response_type(feature_type, annotation)) - - faces = annotations.get( - _REVERSE_TYPES[FeatureTypes.FACE_DETECTION], []) - properties = annotations.get( - _REVERSE_TYPES[FeatureTypes.IMAGE_PROPERTIES], []) - labels = annotations.get( - _REVERSE_TYPES[FeatureTypes.LABEL_DETECTION], []) - landmarks = annotations.get( - _REVERSE_TYPES[FeatureTypes.LANDMARK_DETECTION], []) - logos = annotations.get( - _REVERSE_TYPES[FeatureTypes.LOGO_DETECTION], []) - safe_searches = annotations.get( - _REVERSE_TYPES[FeatureTypes.SAFE_SEARCH_DETECTION], []) - texts = annotations.get( - _REVERSE_TYPES[FeatureTypes.TEXT_DETECTION], []) - - return cls(faces=faces, properties=properties, labels=labels, - landmarks=landmarks, logos=logos, - safe_searches=safe_searches, texts=texts) + return cls(**annotations) def _entity_from_response_type(feature_type, results): - """Convert a JSON result to an entity type based on the feature.""" - + """Convert a JSON result to an entity type based on the feature. + + :rtype: list + :returns: List containing any of + :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`, + :class:`~google.cloud.vision.entity.EntityAnnotation`, + :class:`~google.cloud.vision.face.Face`, + :class:`~google.cloud.vision.safe.SafeSearchAnnotation`. + """ detected_objects = [] - if feature_type == _REVERSE_TYPES[FeatureTypes.FACE_DETECTION]: + if feature_type == 'faceAnnotations': detected_objects.extend( Face.from_api_repr(face) for face in results) - elif feature_type == _REVERSE_TYPES[FeatureTypes.IMAGE_PROPERTIES]: + elif feature_type == 'imagePropertiesAnnotation': detected_objects.append( ImagePropertiesAnnotation.from_api_repr(results)) - elif feature_type == _REVERSE_TYPES[FeatureTypes.SAFE_SEARCH_DETECTION]: + elif feature_type == 'safeSearchAnnotation': detected_objects.append(SafeSearchAnnotation.from_api_repr(results)) else: for result in results: diff --git a/vision/unit_tests/test_client.py b/vision/unit_tests/test_client.py index 1f9a77e8bc79..a7ff1cccd81f 100644 --- a/vision/unit_tests/test_client.py +++ b/vision/unit_tests/test_client.py @@ -82,17 +82,19 @@ def test_image_with_client(self): self.assertIsInstance(image, Image) def test_multiple_detection_from_content(self): + import copy from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes from unit_tests._fixtures import LABEL_DETECTION_RESPONSE from unit_tests._fixtures import LOGO_DETECTION_RESPONSE - RETURNED = LABEL_DETECTION_RESPONSE - LOGOS = LOGO_DETECTION_RESPONSE['responses'][0]['logoAnnotations'] - RETURNED['responses'][0]['logoAnnotations'] = LOGOS + + returned = copy.deepcopy(LABEL_DETECTION_RESPONSE) + logos = copy.deepcopy(LOGO_DETECTION_RESPONSE['responses'][0]) + returned['responses'][0]['logoAnnotations'] = logos['logoAnnotations'] credentials = _Credentials() client = self._make_one(project=PROJECT, credentials=credentials) - client._connection = _Connection(RETURNED) + client._connection = _Connection(returned) limit = 2 label_feature = Feature(FeatureTypes.LABEL_DETECTION, limit) @@ -103,19 +105,26 @@ def test_multiple_detection_from_content(self): self.assertEqual(len(items.logos), 2) self.assertEqual(len(items.labels), 3) - self.assertEqual(items.logos[0].description, 'Brand1') - self.assertEqual(items.logos[0].score, 0.63192177) - self.assertEqual(items.logos[1].description, 'Brand2') - self.assertEqual(items.logos[1].score, 0.5492993) - - self.assertEqual(items.labels[0].description, 'automobile') - self.assertEqual(items.labels[0].score, 0.9776855) - self.assertEqual(items.labels[1].description, 'vehicle') - self.assertEqual(items.labels[1].score, 0.947987) - self.assertEqual(items.labels[2].description, 'truck') - self.assertEqual(items.labels[2].score, 0.88429511) - - image_request = client._connection._requested[0]['data']['requests'][0] + first_logo = items.logos[0] + second_logo = items.logos[1] + self.assertEqual(first_logo.description, 'Brand1') + self.assertEqual(first_logo.score, 0.63192177) + self.assertEqual(second_logo.description, 'Brand2') + self.assertEqual(second_logo.score, 0.5492993) + + first_label = items.labels[0] + second_label = items.labels[1] + third_label = items.labels[2] + self.assertEqual(first_label.description, 'automobile') + self.assertEqual(first_label.score, 0.9776855) + self.assertEqual(second_label.description, 'vehicle') + self.assertEqual(second_label.score, 0.947987) + self.assertEqual(third_label.description, 'truck') + self.assertEqual(third_label.score, 0.88429511) + + requested = client._connection._requested + requests = requested[0]['data']['requests'] + image_request = requests[0] label_request = image_request['features'][0] logo_request = image_request['features'][1] @@ -171,7 +180,7 @@ def test_face_detection_from_content_no_results(self): image = client.image(content=IMAGE_CONTENT) faces = image.detect_faces(limit=5) - self.assertEqual(faces, []) + self.assertEqual(faces, ()) self.assertEqual(len(faces), 0) image_request = client._connection._requested[0]['data']['requests'][0] @@ -211,7 +220,7 @@ def test_label_detection_no_results(self): image = client.image(content=IMAGE_CONTENT) labels = image.detect_labels() - self.assertEqual(labels, []) + self.assertEqual(labels, ()) self.assertEqual(len(labels), 0) def test_landmark_detection_from_source(self): @@ -264,7 +273,7 @@ def test_landmark_detection_no_results(self): image = client.image(content=IMAGE_CONTENT) landmarks = image.detect_landmarks() - self.assertEqual(landmarks, []) + self.assertEqual(landmarks, ()) self.assertEqual(len(landmarks), 0) def test_logo_detection_from_source(self): @@ -353,7 +362,7 @@ def test_safe_search_no_results(self): image = client.image(content=IMAGE_CONTENT) safe_search = image.detect_safe_search() - self.assertEqual(safe_search, []) + self.assertEqual(safe_search, ()) self.assertEqual(len(safe_search), 0) def test_image_properties_detection_from_source(self): @@ -389,7 +398,7 @@ def test_image_properties_no_results(self): image = client.image(content=IMAGE_CONTENT) image_properties = image.detect_properties() - self.assertEqual(image_properties, []) + self.assertEqual(image_properties, ()) self.assertEqual(len(image_properties), 0) From 52aaf056de25e909503ba0be3c5a0a2c1de5304a Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Tue, 29 Nov 2016 21:30:03 -0500 Subject: [PATCH 3/3] Make __init__ defaults immutable tuples, move key_map to modual constant, make duplicated response keys constants. --- vision/google/cloud/vision/annotations.py | 51 +++++++++++++---------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/vision/google/cloud/vision/annotations.py b/vision/google/cloud/vision/annotations.py index 64e0eeff6c70..43b828194b5b 100644 --- a/vision/google/cloud/vision/annotations.py +++ b/vision/google/cloud/vision/annotations.py @@ -21,6 +21,21 @@ from google.cloud.vision.safe import SafeSearchAnnotation +FACE_ANNOTATIONS = 'faceAnnotations' +IMAGE_PROPERTIES_ANNOTATION = 'imagePropertiesAnnotation' +SAFE_SEARCH_ANNOTATION = 'safeSearchAnnotation' + +_KEY_MAP = { + FACE_ANNOTATIONS: 'faces', + IMAGE_PROPERTIES_ANNOTATION: 'properties', + 'labelAnnotations': 'labels', + 'landmarkAnnotations': 'landmarks', + 'logoAnnotations': 'logos', + SAFE_SEARCH_ANNOTATION: 'safe_searches', + 'textAnnotations': 'texts' +} + + class Annotations(object): """Helper class to bundle annotation responses. @@ -51,15 +66,15 @@ class Annotations(object): :param texts: List of :class:`~google.cloud.vision.entity.EntityAnnotation`. """ - def __init__(self, faces=None, properties=None, labels=None, - landmarks=None, logos=None, safe_searches=None, texts=None): - self.faces = faces or () - self.properties = properties or () - self.labels = labels or () - self.landmarks = landmarks or () - self.logos = logos or () - self.safe_searches = safe_searches or () - self.texts = texts or () + def __init__(self, faces=(), properties=(), labels=(), landmarks=(), + logos=(), safe_searches=(), texts=()): + self.faces = faces + self.properties = properties + self.labels = labels + self.landmarks = landmarks + self.logos = logos + self.safe_searches = safe_searches + self.texts = texts @classmethod def from_api_repr(cls, response): @@ -72,18 +87,8 @@ def from_api_repr(cls, response): :returns: An instance of ``Annotations`` with detection types loaded. """ annotations = {} - key_map = { - 'faceAnnotations': 'faces', - 'imagePropertiesAnnotation': 'properties', - 'labelAnnotations': 'labels', - 'landmarkAnnotations': 'landmarks', - 'logoAnnotations': 'logos', - 'safeSearchAnnotation': 'safe_searches', - 'textAnnotations': 'texts' - } - for feature_type, annotation in response.items(): - curr_feature = annotations.setdefault(key_map[feature_type], []) + curr_feature = annotations.setdefault(_KEY_MAP[feature_type], []) curr_feature.extend( _entity_from_response_type(feature_type, annotation)) return cls(**annotations) @@ -100,13 +105,13 @@ def _entity_from_response_type(feature_type, results): :class:`~google.cloud.vision.safe.SafeSearchAnnotation`. """ detected_objects = [] - if feature_type == 'faceAnnotations': + if feature_type == FACE_ANNOTATIONS: detected_objects.extend( Face.from_api_repr(face) for face in results) - elif feature_type == 'imagePropertiesAnnotation': + elif feature_type == IMAGE_PROPERTIES_ANNOTATION: detected_objects.append( ImagePropertiesAnnotation.from_api_repr(results)) - elif feature_type == 'safeSearchAnnotation': + elif feature_type == SAFE_SEARCH_ANNOTATION: detected_objects.append(SafeSearchAnnotation.from_api_repr(results)) else: for result in results: