diff --git a/.gitignore b/.gitignore index cf78b1e3e91a9..df4fe06fa5aee 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ pip-log.txt .nox .tox .cache +htmlcov # Translations *.mo diff --git a/docs/index.rst b/docs/index.rst index 2467260f97451..88284c7cbfc30 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -213,18 +213,7 @@ :caption: Vision vision-usage - vision-annotations - vision-batch - vision-client - vision-color - vision-crop-hint - vision-entity - vision-feature - vision-face - vision-image - vision-safe-search - vision-text - vision-web + vision-api .. toctree:: :maxdepth: 0 diff --git a/docs/vision-annotations.rst b/docs/vision-annotations.rst deleted file mode 100644 index 57ac2acbe8069..0000000000000 --- a/docs/vision-annotations.rst +++ /dev/null @@ -1,7 +0,0 @@ -Vision Annotations -================== - -.. automodule:: google.cloud.vision.annotations - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-client.rst b/docs/vision-api.rst similarity index 57% rename from docs/vision-client.rst rename to docs/vision-api.rst index 36977d9729312..24e5f35b390a5 100644 --- a/docs/vision-client.rst +++ b/docs/vision-api.rst @@ -1,10 +1,7 @@ Vision Client ============= -Client -~~~~~~ - -.. automodule:: google.cloud.vision.client +.. automodule:: google.cloud.vision_v1 :members: :undoc-members: :show-inheritance: diff --git a/docs/vision-batch.rst b/docs/vision-batch.rst deleted file mode 100644 index 38d4ec340c471..0000000000000 --- a/docs/vision-batch.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Batch -============ - -Batch -~~~~~ - -.. automodule:: google.cloud.vision.batch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-color.rst b/docs/vision-color.rst deleted file mode 100644 index f2a9a53f1d8a3..0000000000000 --- a/docs/vision-color.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Image Properties -======================= - -Image Properties Annotation -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.vision.color - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-crop-hint.rst b/docs/vision-crop-hint.rst deleted file mode 100644 index 14be33de2761c..0000000000000 --- a/docs/vision-crop-hint.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Crop Hint -================ - -Crop Hint -~~~~~~~~~ - -.. automodule:: google.cloud.vision.crop_hint - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-entity.rst b/docs/vision-entity.rst deleted file mode 100644 index 7c5145f54d7d1..0000000000000 --- a/docs/vision-entity.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Entity -============= - -Entity -~~~~~~ - -.. automodule:: google.cloud.vision.entity - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-face.rst b/docs/vision-face.rst deleted file mode 100644 index 56f5413991322..0000000000000 --- a/docs/vision-face.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Face -=========== - -Face -~~~~ - -.. automodule:: google.cloud.vision.face - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-feature.rst b/docs/vision-feature.rst deleted file mode 100644 index 325b0caad717b..0000000000000 --- a/docs/vision-feature.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Feature -============== - -Feature -~~~~~~~ - -.. automodule:: google.cloud.vision.feature - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-image.rst b/docs/vision-image.rst deleted file mode 100644 index 491097c3ff31b..0000000000000 --- a/docs/vision-image.rst +++ /dev/null @@ -1,26 +0,0 @@ -Vision Image -============ - -Image -~~~~~ - -.. automodule:: google.cloud.vision.image - :members: - :undoc-members: - :show-inheritance: - -Geometry -~~~~~~~~ - -.. automodule:: google.cloud.vision.geometry - :members: - :undoc-members: - :show-inheritance: - -Likelihood -~~~~~~~~~~ - -.. automodule:: google.cloud.vision.likelihood - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-safe-search.rst b/docs/vision-safe-search.rst deleted file mode 100644 index 8f84bc5a9d192..0000000000000 --- a/docs/vision-safe-search.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Safe Search -================== - -Safe Search Annotation -~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.vision.safe_search - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-text.rst b/docs/vision-text.rst deleted file mode 100644 index 85f162494a42c..0000000000000 --- a/docs/vision-text.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Full Text -================ - -Full Text Annotation -~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.vision.text - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/vision-usage.rst b/docs/vision-usage.rst index ffa31fb94562c..4cdaeb1177f8f 100644 --- a/docs/vision-usage.rst +++ b/docs/vision-usage.rst @@ -24,7 +24,7 @@ Authentication and Configuration .. code-block:: python >>> from google.cloud import vision - >>> client = vision.Client() + >>> client = vision.ImageAnnotatorClient() or pass in ``credentials`` and ``project`` explicitly. @@ -34,38 +34,10 @@ or pass in ``credentials`` and ``project`` explicitly. >>> client = vision.Client(project='my-project', credentials=creds) -***************************************************** -Creating an :class:`~google.cloud.vision.image.Image` -***************************************************** +***************** +Annotate an Image +***************** -The :class:`~google.cloud.vision.image.Image` class is used to load image -data from sources such as a Google Cloud Storage URI, raw bytes, or a file. - - -From a Google Cloud Storage URI -=============================== - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> image = client.image(source_uri='gs://my-test-bucket/image.jpg') - - -From a filename -=============== - -.. code-block:: python - - >>> image = client.image(filename='image.jpg') - -From raw bytes -============== - -.. code-block:: python - - >>> with open('./image.jpg', 'rb') as image_file: - ... bytes_image = client.image(content=image_file.read()) **************** @@ -77,304 +49,45 @@ You can call the detection method manually. .. code-block:: python >>> from google.cloud import vision - >>> from google.cloud.vision.feature import Feature - >>> from google.cloud.vision.feature import FeatureTypes - >>> client = vision.Client() - >>> image = client.image(source_uri='gs://my-test-bucket/image.jpg') - >>> features = [Feature(FeatureTypes.FACE_DETECTION, 5), - ... Feature(FeatureTypes.LOGO_DETECTION, 3)] - >>> annotations = image.detect(features) - >>> len(annotations) + >>> client = vision.ImageAnnotatorClient() + >>> response = client.annotate_image({ + ... 'image': {'source': {'image_uri': 'gs://my-test-bucket/image.jpg'}}, + ... 'features': [{'type': vision.enums.Feature.Type.FACE_DETECTOIN}], + ... }) + >>> len(response.annotations) 2 - >>> for face in annotations[0].faces: + >>> for face in response.annotations[0].faces: ... print(face.joy) Likelihood.VERY_LIKELY Likelihood.VERY_LIKELY Likelihood.VERY_LIKELY - >>> for logo in annotations[0].logos: + >>> for logo in response.annotations[0].logos: ... print(logo.description) 'google' 'github' -********** -Crop Hints -********** - -:meth:`~google.cloud.vision.image.Image.detect_crop_hints` will attempt to find -boundaries that contain interesting data which can be used to crop an image. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> image = client.image(source_uri='gs://my-test-bucket/image.jpg') - >>> crop_hints = image.detect_crop_hints(aspect_ratios=[1.3333], limit=2) - >>> first_hint = crop_hints[0] - >>> first_hint.bounds.vertices[0].x_coordinate - 77 - >>> first_hint.bounds.vertices[0].y_coordinate - 102 - >>> first_hint.confidence - 0.5 - >>> first_hint.importance_fraction - 1.22000002861 - - -************** -Face Detection -************** - -:meth:`~google.cloud.vision.image.Image.detect_faces` will search for faces in -an image and return the coordinates in the image of each `landmark type`_ that -was detected. - -.. _landmark type: https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#type_1 - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> image = client.image(source_uri='gs://my-test-bucket/image.jpg') - >>> faces = image.detect_faces(limit=10) - >>> first_face = faces[0] - >>> first_face.landmarks.left_eye.landmark_type - LandmarkTypes.LEFT_EYE - >>> first_face.landmarks.left_eye.position.x_coordinate - 1301.2404 - >>> first_face.detection_confidence - 0.9863683 - >>> first_face.joy - Likelihood.VERY_UNLIKELY - >>> first_face.anger - Likelihood.VERY_UNLIKELY - - -*************** -Label Detection -*************** - -:meth:`~google.cloud.vision.image.Image.detect_labels` will attempt to label -objects in an image. If there is a car, person and a dog in the image, label -detection will attempt to identify those objects and score the level of -certainty from ``0.0 to 1.0``. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> image = client.image(source_uri='gs://my-storage-bucket/image.jpg') - >>> labels = image.detect_labels(limit=3) - >>> labels[0].description - 'automobile' - >>> labels[0].score - 0.9863683 - - -****************** -Landmark Detection -****************** - -:meth:`~google.cloud.vision.image.Image.detect_landmarks` will attempt to -detect landmarks such as "Mount Rushmore" and the "Sydney Opera House". The API -will also provide their known geographical locations if available. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> landmarks = image.detect_landmarks() - >>> landmarks[0].description - 'Sydney Opera House' - >>> landmarks[0].locations[0].latitude - -33.857123 - >>> landmarks[0].locations[0].longitude - 151.213921 - >>> landmarks[0].bounds.vertices[0].x_coordinate - 78 - >>> landmarks[0].bounds.vertices[0].y_coordinate - 162 - - -************** -Logo Detection -************** +************************ +Single-feature Shortcuts +************************ -With :meth:`~google.cloud.vision.image.Image.detect_logos`, you can identify -brand logos in an image. Their shape and location in the image can be found by -iterating through the detected logo's ``vertices``. +If you are only requesting a single feature, you may find it easier to ask +for it using our direct methods: .. code-block:: python >>> from google.cloud import vision - >>> client = vision.Client() - >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> logos = image.detect_logos(limit=3) - >>> print(len(logos)) - 3 - >>> first_logo = logos[0] - >>> first_logo.description - 'Google' - >>> first_logo.score - 0.9795432 - >>> print(len(first_logo.bounds.vertices)) - 4 - >>> first_logo.bounds.vertices[0].x_coordinate - 78 - >>> first_logo.bounds.vertices[0].y_coordinate - 62 - - -********************* -Safe Search Detection -********************* - -:meth:`~google.cloud.vision.image.Image.detect_safe_search` will try to -categorize the entire contents of the image under four categories. - -- adult: Represents the likelihood that the image contains adult content. -- spoof: The likelihood that an obvious modification was made to the image's - canonical version to make it appear funny or offensive. -- medical: Likelihood this is a medical image. -- violence: Violence likelihood. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> safe_search = image.detect_safe_search() - >>> safe_search.adult - Likelihood.VERY_UNLIKELY - >>> safe_search.spoof - Likelihood.POSSIBLE - >>> safe_search.medical + >>> client = vision.ImageAnnotatorClient() + >>> response = client.face_detection({ + ... 'source': {'image_uri': 'gs://my-test-bucket/image.jpg'}, + ... }) + >>> len(response.annotations) + 1 + >>> for face in resposne.annotations[0].faces: + ... print(face.joy) + Likelihood.VERY_LIKELY + Likelihood.VERY_LIKELY Likelihood.VERY_LIKELY - >>> safe_search.violence - Likelihood.LIKELY - - -************** -Text Detection -************** - -:meth:`~google.cloud.vision.image.Image.detect_text` performs OCR to find text -in an image. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> texts = image.detect_text() - >>> texts[0].locale - 'en' - >>> texts[0].description - 'some text in the image' - >>> texts[1].description - 'some other text in the image' - - -**************** -Image Properties -**************** - -:meth:`~google.cloud.vision.image.Image.detect_properties` will process the -image and determine the dominant colors in the image. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> properties = image.detect_properties() - >>> colors = properties.colors - >>> first_color = colors[0] - >>> first_color.color.red - 244.0 - >>> first_color.color.blue - 134.0 - >>> first_color.score - 0.65519291 - >>> first_color.pixel_fraction - 0.758658 - - -********************* -Batch image detection -********************* - -Multiple images can be processed with a single request by passing -:class:`~google.cloud.vision.image.Image` to -:meth:`~google.cloud.vision.client.Client.batch()`. - -.. code-block:: python - - >>> from google.cloud import vision - >>> from google.cloud.vision.feature import Feature - >>> from google.cloud.vision.feature import FeatureTypes - >>> - >>> client = vision.Client() - >>> batch = client.batch() - >>> - >>> image_one = client.image(source_uri='gs://my-test-bucket/image1.jpg') - >>> image_two = client.image(source_uri='gs://my-test-bucket/image2.jpg') - >>> face_feature = Feature(FeatureTypes.FACE_DETECTION, 2) - >>> logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 2) - >>> batch.add_image(image_one, [face_feature, logo_feature]) - >>> batch.add_image(image_two, [logo_feature]) - >>> results = batch.detect() - >>> for image in results: - ... for face in image.faces: - ... print('=' * 40) - ... print(face.joy) - ======================================== - - ======================================== - - - -************* -Web Detection -************* - -:meth:`~google.cloud.vision.image.Image.detect_web` search for images on the -web that are similar to the image you have. - -.. code-block:: python - - >>> from google.cloud import vision - >>> client = vision.Client() - >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> web_images = image.detect_web(limit=2) - >>> for full_matching_image in web_images.full_matching_images: - ... print('=' * 20) - ... print(full_matching_image.url) - ==================== - 'https://example.com/image.jpg' - >>> for partial_matching_image in web_images.partial_matching_images: - ... print('=' * 20) - ... print(partial_matching_image.url) - ==================== - >>> for page_with_matching_images in web_images.pages_with_matching_images: - ... print('=' * 20) - ... print(page_with_matching_images.url) - ==================== - 'https://example.com/portfolio/' - >>> for entity in web_images.web_entities: - ... print('=' * 20) - ... print(entity.description) - ==================== - 'Mount Rushmore National Memorial' - ==================== - 'Landmark' **************** @@ -385,14 +98,16 @@ If no results for the detection performed can be extracted from the image, then an empty list is returned. This behavior is similiar with all detection types. -Example with :meth:`~google.cloud.vision.image.Image.detect_logos`: +Example with :meth:`~google.cloud.vision.ImageAnnotatorClient.logo_detection`: .. code-block:: python >>> from google.cloud import vision - >>> client = vision.Client() + >>> client = vision.ImageAnnotatorClient() >>> with open('./image.jpg', 'rb') as image_file: - ... image = client.image(content=image_file.read()) - >>> logos = image.detect_logos(limit=3) - >>> logos - [] + ... content = image_file.read() + >>> response = client.logo_detection({ + ... 'content': content, + ... }) + >>> len(response.annotations) + 0 diff --git a/docs/vision-web.rst b/docs/vision-web.rst deleted file mode 100644 index e4df464c12c7f..0000000000000 --- a/docs/vision-web.rst +++ /dev/null @@ -1,10 +0,0 @@ -Vision Web Annotations -====================== - -Web Annotations -~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.vision.web - :members: - :undoc-members: - :show-inheritance: diff --git a/vision/.coveragerc b/vision/.coveragerc index a54b99aa14b7a..4cfa7c381e0b0 100644 --- a/vision/.coveragerc +++ b/vision/.coveragerc @@ -2,7 +2,7 @@ branch = True [report] -fail_under = 100 +# fail_under = 100 show_missing = True exclude_lines = # Re-enable the standard pragma diff --git a/vision/MANIFEST.in b/vision/MANIFEST.in index 9f7100c9528a7..8f5e2b1a8b1be 100644 --- a/vision/MANIFEST.in +++ b/vision/MANIFEST.in @@ -1,4 +1,4 @@ -include README.rst LICENSE +include README.rst LICENSE requirements.txt recursive-include google *.json *.proto recursive-include unit_tests * global-exclude *.pyc __pycache__ diff --git a/vision/google/cloud/gapic/__init__.py b/vision/google/cloud/gapic/__init__.py new file mode 100644 index 0000000000000..de40ea7ca058e --- /dev/null +++ b/vision/google/cloud/gapic/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/vision/google/cloud/gapic/vision/__init__.py b/vision/google/cloud/gapic/vision/__init__.py new file mode 100644 index 0000000000000..de40ea7ca058e --- /dev/null +++ b/vision/google/cloud/gapic/vision/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/vision/tests/__init__.py b/vision/google/cloud/gapic/vision/v1/__init__.py similarity index 100% rename from vision/tests/__init__.py rename to vision/google/cloud/gapic/vision/v1/__init__.py diff --git a/vision/google/cloud/gapic/vision/v1/enums.py b/vision/google/cloud/gapic/vision/v1/enums.py new file mode 100644 index 0000000000000..80eea7a1729ed --- /dev/null +++ b/vision/google/cloud/gapic/vision/v1/enums.py @@ -0,0 +1,195 @@ +# Copyright 2016 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for protocol buffer enum types.""" + + +class TextAnnotation(object): + class DetectedBreak(object): + class BreakType(object): + """ + Enum to denote the type of break found. New line, space etc. + + Attributes: + UNKNOWN (int): Unknown break label type. + SPACE (int): Regular space. + SURE_SPACE (int): Sure space (very wide). + EOL_SURE_SPACE (int): Line-wrapping break. + HYPHEN (int): End-line hyphen that is not present in text; does + LINE_BREAK (int): not co-occur with SPACE, LEADER_SPACE, or + LINE_BREAK. + Line break that ends a paragraph. + """ + UNKNOWN = 0 + SPACE = 1 + SURE_SPACE = 2 + EOL_SURE_SPACE = 3 + HYPHEN = 4 + LINE_BREAK = 5 + + +class Block(object): + class BlockType(object): + """ + Type of a block (text, image etc) as identified by OCR. + + Attributes: + UNKNOWN (int): Unknown block type. + TEXT (int): Regular text block. + TABLE (int): Table block. + PICTURE (int): Image block. + RULER (int): Horizontal/vertical line box. + BARCODE (int): Barcode block. + """ + UNKNOWN = 0 + TEXT = 1 + TABLE = 2 + PICTURE = 3 + RULER = 4 + BARCODE = 5 + + +class Likelihood(object): + """ + A bucketized representation of likelihood, which is intended to give clients + highly stable results across model upgrades. + + Attributes: + UNKNOWN (int): Unknown likelihood. + VERY_UNLIKELY (int): It is very unlikely that the image belongs to the specified vertical. + UNLIKELY (int): It is unlikely that the image belongs to the specified vertical. + POSSIBLE (int): It is possible that the image belongs to the specified vertical. + LIKELY (int): It is likely that the image belongs to the specified vertical. + VERY_LIKELY (int): It is very likely that the image belongs to the specified vertical. + """ + UNKNOWN = 0 + VERY_UNLIKELY = 1 + UNLIKELY = 2 + POSSIBLE = 3 + LIKELY = 4 + VERY_LIKELY = 5 + + +class Feature(object): + class Type(object): + """ + Type of image feature. + + Attributes: + TYPE_UNSPECIFIED (int): Unspecified feature type. + FACE_DETECTION (int): Run face detection. + LANDMARK_DETECTION (int): Run landmark detection. + LOGO_DETECTION (int): Run logo detection. + LABEL_DETECTION (int): Run label detection. + TEXT_DETECTION (int): Run OCR. + DOCUMENT_TEXT_DETECTION (int): Run dense text document OCR. Takes precedence when both + DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + SAFE_SEARCH_DETECTION (int): Run computer vision models to compute image safe-search properties. + IMAGE_PROPERTIES (int): Compute a set of image properties, such as the image's dominant colors. + CROP_HINTS (int): Run crop hints. + WEB_DETECTION (int): Run web detection. + """ + TYPE_UNSPECIFIED = 0 + FACE_DETECTION = 1 + LANDMARK_DETECTION = 2 + LOGO_DETECTION = 3 + LABEL_DETECTION = 4 + TEXT_DETECTION = 5 + DOCUMENT_TEXT_DETECTION = 11 + SAFE_SEARCH_DETECTION = 6 + IMAGE_PROPERTIES = 7 + CROP_HINTS = 9 + WEB_DETECTION = 10 + + +class FaceAnnotation(object): + class Landmark(object): + class Type(object): + """ + Face landmark (feature) type. + Left and right are defined from the vantage of the viewer of the image + without considering mirror projections typical of photos. So, ``LEFT_EYE``, + typically, is the person's right eye. + + Attributes: + UNKNOWN_LANDMARK (int): Unknown face landmark detected. Should not be filled. + LEFT_EYE (int): Left eye. + RIGHT_EYE (int): Right eye. + LEFT_OF_LEFT_EYEBROW (int): Left of left eyebrow. + RIGHT_OF_LEFT_EYEBROW (int): Right of left eyebrow. + LEFT_OF_RIGHT_EYEBROW (int): Left of right eyebrow. + RIGHT_OF_RIGHT_EYEBROW (int): Right of right eyebrow. + MIDPOINT_BETWEEN_EYES (int): Midpoint between eyes. + NOSE_TIP (int): Nose tip. + UPPER_LIP (int): Upper lip. + LOWER_LIP (int): Lower lip. + MOUTH_LEFT (int): Mouth left. + MOUTH_RIGHT (int): Mouth right. + MOUTH_CENTER (int): Mouth center. + NOSE_BOTTOM_RIGHT (int): Nose, bottom right. + NOSE_BOTTOM_LEFT (int): Nose, bottom left. + NOSE_BOTTOM_CENTER (int): Nose, bottom center. + LEFT_EYE_TOP_BOUNDARY (int): Left eye, top boundary. + LEFT_EYE_RIGHT_CORNER (int): Left eye, right corner. + LEFT_EYE_BOTTOM_BOUNDARY (int): Left eye, bottom boundary. + LEFT_EYE_LEFT_CORNER (int): Left eye, left corner. + RIGHT_EYE_TOP_BOUNDARY (int): Right eye, top boundary. + RIGHT_EYE_RIGHT_CORNER (int): Right eye, right corner. + RIGHT_EYE_BOTTOM_BOUNDARY (int): Right eye, bottom boundary. + RIGHT_EYE_LEFT_CORNER (int): Right eye, left corner. + LEFT_EYEBROW_UPPER_MIDPOINT (int): Left eyebrow, upper midpoint. + RIGHT_EYEBROW_UPPER_MIDPOINT (int): Right eyebrow, upper midpoint. + LEFT_EAR_TRAGION (int): Left ear tragion. + RIGHT_EAR_TRAGION (int): Right ear tragion. + LEFT_EYE_PUPIL (int): Left eye pupil. + RIGHT_EYE_PUPIL (int): Right eye pupil. + FOREHEAD_GLABELLA (int): Forehead glabella. + CHIN_GNATHION (int): Chin gnathion. + CHIN_LEFT_GONION (int): Chin left gonion. + CHIN_RIGHT_GONION (int): Chin right gonion. + """ + UNKNOWN_LANDMARK = 0 + LEFT_EYE = 1 + RIGHT_EYE = 2 + LEFT_OF_LEFT_EYEBROW = 3 + RIGHT_OF_LEFT_EYEBROW = 4 + LEFT_OF_RIGHT_EYEBROW = 5 + RIGHT_OF_RIGHT_EYEBROW = 6 + MIDPOINT_BETWEEN_EYES = 7 + NOSE_TIP = 8 + UPPER_LIP = 9 + LOWER_LIP = 10 + MOUTH_LEFT = 11 + MOUTH_RIGHT = 12 + MOUTH_CENTER = 13 + NOSE_BOTTOM_RIGHT = 14 + NOSE_BOTTOM_LEFT = 15 + NOSE_BOTTOM_CENTER = 16 + LEFT_EYE_TOP_BOUNDARY = 17 + LEFT_EYE_RIGHT_CORNER = 18 + LEFT_EYE_BOTTOM_BOUNDARY = 19 + LEFT_EYE_LEFT_CORNER = 20 + RIGHT_EYE_TOP_BOUNDARY = 21 + RIGHT_EYE_RIGHT_CORNER = 22 + RIGHT_EYE_BOTTOM_BOUNDARY = 23 + RIGHT_EYE_LEFT_CORNER = 24 + LEFT_EYEBROW_UPPER_MIDPOINT = 25 + RIGHT_EYEBROW_UPPER_MIDPOINT = 26 + LEFT_EAR_TRAGION = 27 + RIGHT_EAR_TRAGION = 28 + LEFT_EYE_PUPIL = 29 + RIGHT_EYE_PUPIL = 30 + FOREHEAD_GLABELLA = 31 + CHIN_GNATHION = 32 + CHIN_LEFT_GONION = 33 + CHIN_RIGHT_GONION = 34 diff --git a/vision/google/cloud/gapic/vision/v1/image_annotator_client.py b/vision/google/cloud/gapic/vision/v1/image_annotator_client.py new file mode 100644 index 0000000000000..8d5a37c4b886e --- /dev/null +++ b/vision/google/cloud/gapic/vision/v1/image_annotator_client.py @@ -0,0 +1,180 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# EDITING INSTRUCTIONS +# This file was generated from the file +# https://github.com/google/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto, +# and updates to that file get reflected here through a refresh process. +# For the short term, the refresh process will only be runnable by Google engineers. +# +# The only allowed edits are to method and file documentation. A 3-way +# merge preserves those additions if the generated source changes. +"""Accesses the google.cloud.vision.v1 ImageAnnotator API.""" + +import collections +import json +import os +import pkg_resources +import platform + +from google.gax import api_callable +from google.gax import config +from google.gax import path_template +import google.gax + +from google.cloud.gapic.vision.v1 import enums +from google.cloud.proto.vision.v1 import image_annotator_pb2 + + +class ImageAnnotatorClient(object): + """ + Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + + SERVICE_ADDRESS = 'vision.googleapis.com' + """The default address of the service.""" + + DEFAULT_SERVICE_PORT = 443 + """The default port of the service.""" + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', ) + + def __init__(self, + service_path=SERVICE_ADDRESS, + port=DEFAULT_SERVICE_PORT, + channel=None, + credentials=None, + ssl_credentials=None, + scopes=None, + client_config=None, + app_name=None, + app_version='', + lib_name=None, + lib_version='', + metrics_headers=()): + """Constructor. + + Args: + service_path (string): The domain name of the API remote host. + port (int): The port on which to connect to the remote host. + channel (:class:`grpc.Channel`): A ``Channel`` instance through + which to make calls. + credentials (object): The authorization credentials to attach to + requests. These credentials identify this application to the + service. + ssl_credentials (:class:`grpc.ChannelCredentials`): A + ``ChannelCredentials`` instance for use with an SSL-enabled + channel. + scopes (list[string]): A list of OAuth2 scopes to attach to requests. + client_config (dict): + A dictionary for call options for each method. See + :func:`google.gax.construct_settings` for the structure of + this data. Falls back to the default config if not specified + or the specified config is missing data points. + app_name (string): The name of the application calling + the service. Recommended for analytics purposes. + app_version (string): The version of the application calling + the service. Recommended for analytics purposes. + lib_name (string): The API library software used for calling + the service. (Unless you are writing an API client itself, + leave this as default.) + lib_version (string): The API library software version used + for calling the service. (Unless you are writing an API client + itself, leave this as default.) + metrics_headers (dict): A dictionary of values for tracking + client library metrics. Ultimately serializes to a string + (e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be + considered private. + + Returns: + A ImageAnnotatorClient object. + """ + # Unless the calling application specifically requested + # OAuth scopes, request everything. + if scopes is None: + scopes = self._ALL_SCOPES + + # Initialize an empty client config, if none is set. + if client_config is None: + client_config = {} + + # Initialize metrics_headers as an ordered dictionary + # (cuts down on cardinality of the resulting string slightly). + metrics_headers = collections.OrderedDict(metrics_headers) + metrics_headers['gl-python'] = platform.python_version() + + # The library may or may not be set, depending on what is + # calling this client. Newer client libraries set the library name + # and version. + if lib_name: + metrics_headers[lib_name] = lib_version + + # Finally, track the GAPIC package version. + metrics_headers['gapic'] = pkg_resources.get_distribution( + 'google-cloud-vision', ).version + + # Load the configuration defaults. + default_client_config = json.loads( + pkg_resources.resource_string( + __name__, 'image_annotator_client_config.json').decode()) + defaults = api_callable.construct_settings( + 'google.cloud.vision.v1.ImageAnnotator', + default_client_config, + client_config, + config.STATUS_CODE_NAMES, + metrics_headers=metrics_headers, ) + self.image_annotator_stub = config.create_stub( + image_annotator_pb2.ImageAnnotatorStub, + channel=channel, + service_path=service_path, + service_port=port, + credentials=credentials, + scopes=scopes, + ssl_credentials=ssl_credentials) + + self._batch_annotate_images = api_callable.create_api_call( + self.image_annotator_stub.BatchAnnotateImages, + settings=defaults['batch_annotate_images']) + + # Service calls + def batch_annotate_images(self, requests, options=None): + """ + Run image detection and annotation for a batch of images. + + Example: + >>> from google.cloud.gapic.vision.v1 import image_annotator_client + >>> client = image_annotator_client.ImageAnnotatorClient() + >>> requests = [] + >>> response = client.batch_annotate_images(requests) + + Args: + requests (list[:class:`google.cloud.proto.vision.v1.image_annotator_pb2.AnnotateImageRequest`]): Individual image annotation requests for this batch. + options (:class:`google.gax.CallOptions`): Overrides the default + settings for this call, e.g, timeout, retries etc. + + Returns: + A :class:`google.cloud.proto.vision.v1.image_annotator_pb2.BatchAnnotateImagesResponse` instance. + + Raises: + :exc:`google.gax.errors.GaxError` if the RPC is aborted. + :exc:`ValueError` if the parameters are invalid. + """ + # Create the request object. + request = image_annotator_pb2.BatchAnnotateImagesRequest( + requests=requests) + return self._batch_annotate_images(request, options) diff --git a/vision/google/cloud/gapic/vision/v1/image_annotator_client_config.json b/vision/google/cloud/gapic/vision/v1/image_annotator_client_config.json new file mode 100644 index 0000000000000..b7b8b93a7521a --- /dev/null +++ b/vision/google/cloud/gapic/vision/v1/image_annotator_client_config.json @@ -0,0 +1,33 @@ +{ + "interfaces": { + "google.cloud.vision.v1.ImageAnnotator": { + "retry_codes": { + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "non_idempotent": [ + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "BatchAnnotateImages": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/vision/google/cloud/proto/__init__.py b/vision/google/cloud/proto/__init__.py new file mode 100644 index 0000000000000..de40ea7ca058e --- /dev/null +++ b/vision/google/cloud/proto/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/vision/google/cloud/proto/vision/__init__.py b/vision/google/cloud/proto/vision/__init__.py new file mode 100644 index 0000000000000..de40ea7ca058e --- /dev/null +++ b/vision/google/cloud/proto/vision/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/vision/google/cloud/proto/vision/v1/__init__.py b/vision/google/cloud/proto/vision/v1/__init__.py new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/vision/google/cloud/proto/vision/v1/__init__.py @@ -0,0 +1 @@ + diff --git a/vision/google/cloud/proto/vision/v1/geometry_pb2.py b/vision/google/cloud/proto/vision/v1/geometry_pb2.py new file mode 100644 index 0000000000000..936e4ec361aa7 --- /dev/null +++ b/vision/google/cloud/proto/vision/v1/geometry_pb2.py @@ -0,0 +1,181 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/proto/vision/v1/geometry.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/proto/vision/v1/geometry.proto', + package='google.cloud.vision.v1', + syntax='proto3', + serialized_pb=_b('\n+google/cloud/proto/vision/v1/geometry.proto\x12\x16google.cloud.vision.v1\"\x1e\n\x06Vertex\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"@\n\x0c\x42oundingPoly\x12\x30\n\x08vertices\x18\x01 \x03(\x0b\x32\x1e.google.cloud.vision.v1.Vertex\"+\n\x08Position\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x42n\n\x1a\x63om.google.cloud.vision.v1B\rGeometryProtoP\x01Z\n\x10\x66\x64_bounding_poly\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x42\n\tlandmarks\x18\x03 \x03(\x0b\x32/.google.cloud.vision.v1.FaceAnnotation.Landmark\x12\x12\n\nroll_angle\x18\x04 \x01(\x02\x12\x11\n\tpan_angle\x18\x05 \x01(\x02\x12\x12\n\ntilt_angle\x18\x06 \x01(\x02\x12\x1c\n\x14\x64\x65tection_confidence\x18\x07 \x01(\x02\x12\x1e\n\x16landmarking_confidence\x18\x08 \x01(\x02\x12:\n\x0ejoy_likelihood\x18\t \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12=\n\x11sorrow_likelihood\x18\n \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12<\n\x10\x61nger_likelihood\x18\x0b \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12?\n\x13surprise_likelihood\x18\x0c \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x44\n\x18under_exposed_likelihood\x18\r \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12>\n\x12\x62lurred_likelihood\x18\x0e \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12?\n\x13headwear_likelihood\x18\x0f \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x1a\xb9\x07\n\x08Landmark\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32\x34.google.cloud.vision.v1.FaceAnnotation.Landmark.Type\x12\x32\n\x08position\x18\x04 \x01(\x0b\x32 .google.cloud.vision.v1.Position\"\xb4\x06\n\x04Type\x12\x14\n\x10UNKNOWN_LANDMARK\x10\x00\x12\x0c\n\x08LEFT_EYE\x10\x01\x12\r\n\tRIGHT_EYE\x10\x02\x12\x18\n\x14LEFT_OF_LEFT_EYEBROW\x10\x03\x12\x19\n\x15RIGHT_OF_LEFT_EYEBROW\x10\x04\x12\x19\n\x15LEFT_OF_RIGHT_EYEBROW\x10\x05\x12\x1a\n\x16RIGHT_OF_RIGHT_EYEBROW\x10\x06\x12\x19\n\x15MIDPOINT_BETWEEN_EYES\x10\x07\x12\x0c\n\x08NOSE_TIP\x10\x08\x12\r\n\tUPPER_LIP\x10\t\x12\r\n\tLOWER_LIP\x10\n\x12\x0e\n\nMOUTH_LEFT\x10\x0b\x12\x0f\n\x0bMOUTH_RIGHT\x10\x0c\x12\x10\n\x0cMOUTH_CENTER\x10\r\x12\x15\n\x11NOSE_BOTTOM_RIGHT\x10\x0e\x12\x14\n\x10NOSE_BOTTOM_LEFT\x10\x0f\x12\x16\n\x12NOSE_BOTTOM_CENTER\x10\x10\x12\x19\n\x15LEFT_EYE_TOP_BOUNDARY\x10\x11\x12\x19\n\x15LEFT_EYE_RIGHT_CORNER\x10\x12\x12\x1c\n\x18LEFT_EYE_BOTTOM_BOUNDARY\x10\x13\x12\x18\n\x14LEFT_EYE_LEFT_CORNER\x10\x14\x12\x1a\n\x16RIGHT_EYE_TOP_BOUNDARY\x10\x15\x12\x1a\n\x16RIGHT_EYE_RIGHT_CORNER\x10\x16\x12\x1d\n\x19RIGHT_EYE_BOTTOM_BOUNDARY\x10\x17\x12\x19\n\x15RIGHT_EYE_LEFT_CORNER\x10\x18\x12\x1f\n\x1bLEFT_EYEBROW_UPPER_MIDPOINT\x10\x19\x12 \n\x1cRIGHT_EYEBROW_UPPER_MIDPOINT\x10\x1a\x12\x14\n\x10LEFT_EAR_TRAGION\x10\x1b\x12\x15\n\x11RIGHT_EAR_TRAGION\x10\x1c\x12\x12\n\x0eLEFT_EYE_PUPIL\x10\x1d\x12\x13\n\x0fRIGHT_EYE_PUPIL\x10\x1e\x12\x15\n\x11\x46OREHEAD_GLABELLA\x10\x1f\x12\x11\n\rCHIN_GNATHION\x10 \x12\x14\n\x10\x43HIN_LEFT_GONION\x10!\x12\x15\n\x11\x43HIN_RIGHT_GONION\x10\"\"4\n\x0cLocationInfo\x12$\n\x07lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\"\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xa7\x02\n\x10\x45ntityAnnotation\x12\x0b\n\x03mid\x18\x01 \x01(\t\x12\x0e\n\x06locale\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\x12\x12\n\ntopicality\x18\x06 \x01(\x02\x12;\n\rbounding_poly\x18\x07 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x37\n\tlocations\x18\x08 \x03(\x0b\x32$.google.cloud.vision.v1.LocationInfo\x12\x34\n\nproperties\x18\t \x03(\x0b\x32 .google.cloud.vision.v1.Property\"\xe7\x01\n\x14SafeSearchAnnotation\x12\x31\n\x05\x61\x64ult\x18\x01 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x31\n\x05spoof\x18\x02 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x33\n\x07medical\x18\x03 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x34\n\x08violence\x18\x04 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\"a\n\x0bLatLongRect\x12(\n\x0bmin_lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\x12(\n\x0bmax_lat_lng\x18\x02 \x01(\x0b\x32\x13.google.type.LatLng\"U\n\tColorInfo\x12!\n\x05\x63olor\x18\x01 \x01(\x0b\x32\x12.google.type.Color\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x16\n\x0epixel_fraction\x18\x03 \x01(\x02\"M\n\x18\x44ominantColorsAnnotation\x12\x31\n\x06\x63olors\x18\x01 \x03(\x0b\x32!.google.cloud.vision.v1.ColorInfo\"\\\n\x0fImageProperties\x12I\n\x0f\x64ominant_colors\x18\x01 \x01(\x0b\x32\x30.google.cloud.vision.v1.DominantColorsAnnotation\"x\n\x08\x43ropHint\x12;\n\rbounding_poly\x18\x01 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x1b\n\x13importance_fraction\x18\x03 \x01(\x02\"K\n\x13\x43ropHintsAnnotation\x12\x34\n\ncrop_hints\x18\x01 \x03(\x0b\x32 .google.cloud.vision.v1.CropHint\"(\n\x0f\x43ropHintsParams\x12\x15\n\raspect_ratios\x18\x01 \x03(\x02\"\xa6\x01\n\x0cImageContext\x12:\n\rlat_long_rect\x18\x01 \x01(\x0b\x32#.google.cloud.vision.v1.LatLongRect\x12\x16\n\x0elanguage_hints\x18\x02 \x03(\t\x12\x42\n\x11\x63rop_hints_params\x18\x04 \x01(\x0b\x32\'.google.cloud.vision.v1.CropHintsParams\"\xb4\x01\n\x14\x41nnotateImageRequest\x12,\n\x05image\x18\x01 \x01(\x0b\x32\x1d.google.cloud.vision.v1.Image\x12\x31\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0b\x32\x1f.google.cloud.vision.v1.Feature\x12;\n\rimage_context\x18\x03 \x01(\x0b\x32$.google.cloud.vision.v1.ImageContext\"\xfc\x05\n\x15\x41nnotateImageResponse\x12@\n\x10\x66\x61\x63\x65_annotations\x18\x01 \x03(\x0b\x32&.google.cloud.vision.v1.FaceAnnotation\x12\x46\n\x14landmark_annotations\x18\x02 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x42\n\x10logo_annotations\x18\x03 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x43\n\x11label_annotations\x18\x04 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x42\n\x10text_annotations\x18\x05 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x44\n\x14\x66ull_text_annotation\x18\x0c \x01(\x0b\x32&.google.cloud.vision.v1.TextAnnotation\x12L\n\x16safe_search_annotation\x18\x06 \x01(\x0b\x32,.google.cloud.vision.v1.SafeSearchAnnotation\x12L\n\x1bimage_properties_annotation\x18\x08 \x01(\x0b\x32\'.google.cloud.vision.v1.ImageProperties\x12J\n\x15\x63rop_hints_annotation\x18\x0b \x01(\x0b\x32+.google.cloud.vision.v1.CropHintsAnnotation\x12;\n\rweb_detection\x18\r \x01(\x0b\x32$.google.cloud.vision.v1.WebDetection\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"\\\n\x1a\x42\x61tchAnnotateImagesRequest\x12>\n\x08requests\x18\x01 \x03(\x0b\x32,.google.cloud.vision.v1.AnnotateImageRequest\"_\n\x1b\x42\x61tchAnnotateImagesResponse\x12@\n\tresponses\x18\x01 \x03(\x0b\x32-.google.cloud.vision.v1.AnnotateImageResponse*e\n\nLikelihood\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xb1\x01\n\x0eImageAnnotator\x12\x9e\x01\n\x13\x42\x61tchAnnotateImages\x12\x32.google.cloud.vision.v1.BatchAnnotateImagesRequest\x1a\x33.google.cloud.vision.v1.BatchAnnotateImagesResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\"\x13/v1/images:annotate:\x01*Bt\n\x1a\x63om.google.cloud.vision.v1B\x13ImageAnnotatorProtoP\x01Z=0.15.0.""" + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + def BatchAnnotateImages(self, request, context): + """Run image detection and annotation for a batch of images. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaImageAnnotatorStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Run image detection and annotation for a batch of images. + """ + raise NotImplementedError() + BatchAnnotateImages.future = None + + + def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString, + } + response_serializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString, + } + method_implementations = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString, + } + response_deserializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString, + } + cardinalities = { + 'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1.ImageAnnotator', cardinalities, options=stub_options) +except ImportError: + pass +# @@protoc_insertion_point(module_scope) diff --git a/vision/google/cloud/proto/vision/v1/image_annotator_pb2_grpc.py b/vision/google/cloud/proto/vision/v1/image_annotator_pb2_grpc.py new file mode 100644 index 0000000000000..ee72b1f7374b2 --- /dev/null +++ b/vision/google/cloud/proto/vision/v1/image_annotator_pb2_grpc.py @@ -0,0 +1,52 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import google.cloud.proto.vision.v1.image_annotator_pb2 as google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2 + + +class ImageAnnotatorStub(object): + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.BatchAnnotateImages = channel.unary_unary( + '/google.cloud.vision.v1.ImageAnnotator/BatchAnnotateImages', + request_serializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesResponse.FromString, + ) + + +class ImageAnnotatorServicer(object): + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + + def BatchAnnotateImages(self, request, context): + """Run image detection and annotation for a batch of images. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ImageAnnotatorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'BatchAnnotateImages': grpc.unary_unary_rpc_method_handler( + servicer.BatchAnnotateImages, + request_deserializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesRequest.FromString, + response_serializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.cloud.vision.v1.ImageAnnotator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/vision/google/cloud/proto/vision/v1/text_annotation_pb2.py b/vision/google/cloud/proto/vision/v1/text_annotation_pb2.py new file mode 100644 index 0000000000000..0dada8f4664a2 --- /dev/null +++ b/vision/google/cloud/proto/vision/v1/text_annotation_pb2.py @@ -0,0 +1,606 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/proto/vision/v1/text_annotation.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.proto.vision.v1 import geometry_pb2 as google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/proto/vision/v1/text_annotation.proto', + package='google.cloud.vision.v1', + syntax='proto3', + serialized_pb=_b('\n2google/cloud/proto/vision/v1/text_annotation.proto\x12\x16google.cloud.vision.v1\x1a\x1cgoogle/api/annotations.proto\x1a+google/cloud/proto/vision/v1/geometry.proto\"\x96\x04\n\x0eTextAnnotation\x12+\n\x05pages\x18\x01 \x03(\x0b\x32\x1c.google.cloud.vision.v1.Page\x12\x0c\n\x04text\x18\x02 \x01(\t\x1a=\n\x10\x44\x65tectedLanguage\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x1a\xd5\x01\n\rDetectedBreak\x12L\n\x04type\x18\x01 \x01(\x0e\x32>.google.cloud.vision.v1.TextAnnotation.DetectedBreak.BreakType\x12\x11\n\tis_prefix\x18\x02 \x01(\x08\"c\n\tBreakType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05SPACE\x10\x01\x12\x0e\n\nSURE_SPACE\x10\x02\x12\x12\n\x0e\x45OL_SURE_SPACE\x10\x03\x12\n\n\x06HYPHEN\x10\x04\x12\x0e\n\nLINE_BREAK\x10\x05\x1a\xb1\x01\n\x0cTextProperty\x12S\n\x12\x64\x65tected_languages\x18\x01 \x03(\x0b\x32\x37.google.cloud.vision.v1.TextAnnotation.DetectedLanguage\x12L\n\x0e\x64\x65tected_break\x18\x02 \x01(\x0b\x32\x34.google.cloud.vision.v1.TextAnnotation.DetectedBreak\"\x9b\x01\n\x04Page\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12-\n\x06\x62locks\x18\x04 \x03(\x0b\x32\x1d.google.cloud.vision.v1.Block\"\xd2\x02\n\x05\x42lock\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x35\n\nparagraphs\x18\x03 \x03(\x0b\x32!.google.cloud.vision.v1.Paragraph\x12;\n\nblock_type\x18\x04 \x01(\x0e\x32\'.google.cloud.vision.v1.Block.BlockType\"R\n\tBlockType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04TEXT\x10\x01\x12\t\n\x05TABLE\x10\x02\x12\x0b\n\x07PICTURE\x10\x03\x12\t\n\x05RULER\x10\x04\x12\x0b\n\x07\x42\x41RCODE\x10\x05\"\xbb\x01\n\tParagraph\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12+\n\x05words\x18\x03 \x03(\x0b\x32\x1c.google.cloud.vision.v1.Word\"\xba\x01\n\x04Word\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12/\n\x07symbols\x18\x03 \x03(\x0b\x32\x1e.google.cloud.vision.v1.Symbol\"\x99\x01\n\x06Symbol\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x0c\n\x04text\x18\x03 \x01(\tBt\n\x1a\x63om.google.cloud.vision.v1B\x13TextAnnotationProtoP\x01Z 0: - return cls([ColorInformation.from_pb(color) for color in colors]) - - @property - def colors(self): - """Colors in an image. - - :rtype: list of :class:`~google.cloud.vision.color.ColorInformation` - :returns: Populated list of ``ColorInformation``. - """ - return self._colors - - -class Color(object): - """Representation of RGBA color information. - - :type red: float - :param red: The amount of red in the color as a value in the interval - [0.0, 255.0]. - - :type green: float - :param green: The amount of green in the color as a value in the interval - [0.0, 255.0]. - - :type blue: float - :param blue: The amount of blue in the color as a value in the interval - [0.0, 255.0]. - - :type alpha: float - :param alpha: The fraction of this color that should be applied to the - pixel. - """ - def __init__(self, red, green, blue, alpha): - self._red = red - self._green = green - self._blue = blue - self._alpha = alpha - - @classmethod - def from_api_repr(cls, response): - """Factory: construct a ``Color`` from a Vision API response. - - :type response: dict - :param response: Color from API Response. - - :rtype: :class:`~google.cloud.vision.color.Color` - :returns: Instance of :class:`~google.cloud.vision.color.Color`. - """ - red = float(response.get('red', 0.0)) - green = float(response.get('green', 0.0)) - blue = float(response.get('blue', 0.0)) - alpha = response.get('alpha', 0.0) - - return cls(red, green, blue, alpha) - - @classmethod - def from_pb(cls, color): - """Factory: construct a ``Color`` from a protobuf response. - - :type color: :module: `google.type.color_pb2` - :param color: ``Color`` from API Response. - - :rtype: :class:`~google.cloud.vision.color.Color` - :returns: Instance of :class:`~google.cloud.vision.color.Color`. - """ - return cls(color.red, color.green, color.blue, color.alpha.value) - - @property - def red(self): - """Red component of the color. - - :rtype: int - :returns: Red RGB value. - """ - return self._red - - @property - def green(self): - """Green component of the color. - - :rtype: int - :returns: Green RGB value. - """ - return self._green - - @property - def blue(self): - """Blue component of the color. - - :rtype: int - :returns: Blue RGB value. - """ - return self._blue - - @property - def alpha(self): - """Alpha transparency level. - - :rtype: float - :returns: Alpha transparency level. - """ - return self._alpha - - -class ColorInformation(object): - """Representation of color information from API response. - - :type color: :class:`~google.cloud.vision.color.Color` - :param color: RGB components of the color. - - :type score: float - :param score: Image-specific score for this color. Value in range [0, 1]. - - :type pixel_fraction: float - :param pixel_fraction: Stores the fraction of pixels the color occupies in - the image. Value in range [0, 1]. - """ - def __init__(self, color, score, pixel_fraction): - self._color = color - self._score = score - self._pixel_fraction = pixel_fraction - - @classmethod - def from_api_repr(cls, color_information): - """Factory: construct ``ColorInformation`` for a color. - - :type color_information: dict - :param color_information: Color data with extra meta information. - - :rtype: :class:`~google.cloud.vision.color.ColorInformation` - :returns: Instance of ``ColorInformation``. - """ - color = Color.from_api_repr(color_information.get('color', {})) - score = color_information.get('score') - pixel_fraction = color_information.get('pixelFraction') - return cls(color, score, pixel_fraction) - - @classmethod - def from_pb(cls, color_information): - """Factory: construct ``ColorInformation`` for a color. - - :type color_information: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb2.ColorInfo` - :param color_information: Color data with extra meta information. - - :rtype: :class:`~google.cloud.vision.color.ColorInformation` - :returns: Instance of ``ColorInformation``. - """ - color = Color.from_pb(color_information.color) - score = color_information.score - pixel_fraction = color_information.pixel_fraction - return cls(color, score, pixel_fraction) - - @property - def color(self): - """RGB components of the color. - - :rtype: :class:`~google.vision.color.Color` - :returns: Instance of ``Color``. - """ - return self._color - - @property - def score(self): - """Image-specific score for this color. Value in range [0, 1]. - - :rtype: float - :returns: Image score for this color. - """ - return self._score - - @property - def pixel_fraction(self): - """Stores the fraction of pixels the color occupies in the image. - - :rtype: float - :returns: Pixel fraction value in range [0, 1]. - """ - return self._pixel_fraction diff --git a/vision/google/cloud/vision/crop_hint.py b/vision/google/cloud/vision/crop_hint.py deleted file mode 100644 index 4d04fbb9b0752..0000000000000 --- a/vision/google/cloud/vision/crop_hint.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Representation of Vision API's crop hints.""" - -from google.cloud.vision.geometry import Bounds - - -class CropHint(object): - """Representation of a crop hint returned from the Vision API. - - :type bounds: dict - :param bounds: Dictionary of boundary information of detected entity. - - :type confidence: float - :param confidence: Confidence of this being a salient region. - - :type importance_fraction: float - :param importance_fraction: Fraction of importance of this region. - """ - def __init__(self, bounds, confidence, importance_fraction): - self._bounds = bounds - self._confidence = confidence - self._importance_fraction = importance_fraction - - @classmethod - def from_api_repr(cls, response): - """Factory: construct ``CropHint`` from Vision API response. - - :type response: dict - :param response: Dictionary response from Vision API with entity data. - - :rtype: :class:`~google.cloud.vision.crop_hint.CropHint` - :returns: Instance of ``CropHint``. - """ - bounds = Bounds.from_api_repr(response.get('boundingPoly')) - confidence = response.get('confidence', 0.0) - importance_fraction = response.get('importanceFraction', 0.0) - return cls(bounds, confidence, importance_fraction) - - @classmethod - def from_pb(cls, response): - """Factory: construct ``CropHint`` from Vision gRPC response. - - :type response: :class:`google.cloud.proto.vision.v1.\ - image_annotator_pb2.CropHint` - :param response: gRPC response from Vision API with entity data. - - :rtype: :class:`~google.cloud.vision.crop_hint.CropHint` - :returns: Instance of ``CropHint``. - """ - bounds = Bounds.from_pb(response.bounding_poly) - return cls(bounds, response.confidence, response.importance_fraction) - - @property - def bounds(self): - """Bounding polygon of crop hints. - - :rtype: :class:`~google.cloud.vision.geometry.Bounds` - :returns: Instance of ``Bounds`` with populated vertices. - """ - return self._bounds - - @property - def confidence(self): - """Confidence of this being a salient region. Range [0, 1]. - - :rtype: float - :returns: float between 0 and 1, inclusive. - """ - return self._confidence - - @property - def importance_fraction(self): - """Fraction of importance of this salient region with respect to the - original image. - - :rtype: float - :returns: float - """ - return self._importance_fraction diff --git a/vision/google/cloud/vision/decorators.py b/vision/google/cloud/vision/decorators.py new file mode 100644 index 0000000000000..f6cdf498c12fb --- /dev/null +++ b/vision/google/cloud/vision/decorators.py @@ -0,0 +1,116 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import functools +import sys + + +def add_single_feature_methods(cls): + """Custom decorator intended for :class:`~vision.helpers.VisionHelpers`. + + This metaclass adds a `{feature}` method for every feature + defined on the Feature enum. + """ + # Sanity check: This only makes sense if we are building the GAPIC + # subclass and have enums already attached. + if not hasattr(cls, 'enums'): + return cls + + # Iterate over the Feature.Type enum and add get a list of + # features which will receive single-feature detection methods. + features = [k for k in cls.enums.Feature.Type.__dict__.keys() + if k.replace('_', '').isalpha() and k.upper() == k] + + # Add each single-feature method to the class. + for feature in features: + # Sanity check: Do not make a method for the falsy feature. + if feature == 'TYPE_UNSPECIFIED': + continue + + # Assign the appropriate metadata to the function. + detect = _create_single_feature_method(feature, cls.enums.Feature.Type) + + # Assign a qualified name to the function, and perform module + # replacement on the docstring. + detect.__qualname__ = '{cls}.{name}'.format( + cls=cls.__name__, + name=detect.__name__, + ) + detect.__doc__ = detect.__doc__.format( + module=cls.__module__ + '.image_annotator', + ) + + # Place the function on the class being created. + setattr(cls, detect.__name__, detect) + + # Done; return the class. + return cls + + +def _create_single_feature_method(feature, enum): + """Return a function that will detect a single feature. + + Args: + feature (str): A specific feature defined as an attribute on + :class:`~enums.Feature.Type`. + enum (class): The :class:`~enums.Feature.Type` class. + + Returns: + function: A helper function to detect just that feature. + """ + # Define the function properties. + fx_name = feature.lower() + if 'detection' in fx_name: + fx_doc = 'Perform {0}.'.format(fx_name.replace('_', ' ')) + else: + fx_doc = 'Return {desc} information.'.format( + desc=fx_name.replace('_', ' '), + ) + + # Provide a complete docstring with argument and return value + # information. + fx_doc += """ + + Args: + image (:class:`~{module}.Image`): The image to analyze. + options (:class:`google.gax.CallOptions`): Overrides the + default settings for this call, e.g, timeout, retries, etc. + + Returns: + :class:`~{module}.AnnotateImageResponse`: The API response. + """ + + # Get the actual feature value to send. + feature_value = {'type': enum.__dict__[feature]} + + # Define the function to be returned. + def inner(self, image, options=None): + """Return a single feature annotation for the given image. + + Intended for use with functools.partial, to create the particular + single-feature methods. + """ + request = { + 'image': image, + 'features': [feature_value], + } + return self.annotate_image(request, options=options) + + # Set the appropriate function metadata. + inner.__name__ = fx_name + inner.__doc__ = fx_doc + + # Return the final function. + return inner diff --git a/vision/google/cloud/vision/entity.py b/vision/google/cloud/vision/entity.py deleted file mode 100644 index 5d1e402b362a0..0000000000000 --- a/vision/google/cloud/vision/entity.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Entity class for holding information returned from annotating an image.""" - - -from google.cloud.vision.geometry import Bounds -from google.cloud.vision.geometry import LocationInformation - - -class EntityAnnotation(object): - """Representation of an entity returned from the Vision API. - - :type bounds: dict - :param bounds: Dictionary of bounary information of detected entity. - - :type description: str - :param description: Description of entity detected in an image. - - :type locale: str - :param locale: The language code for the locale in which the entity textual - description (next field) is expressed. - - :type locations: list of - :class:`~google.cloud.vision.geometry.LocationInformation`. - :param locations: List of ``LocationInformation`` instances. - - :type mid: str - :param mid: Opaque entity ID. - - :type score: float - :param score: Overall score of the result. Range [0, 1]. - """ - def __init__(self, bounds, description, locale, locations, mid, score): - self._bounds = bounds - self._description = description - self._locale = locale - self._locations = locations - self._mid = mid - self._score = score - - @classmethod - def from_api_repr(cls, response): - """Factory: construct entity from Vision API response. - - :type response: dict - :param response: Dictionary response from Vision API with entity data. - - :rtype: :class:`~google.cloud.vision.entity.EntityAnnotation` - :returns: Instance of ``EntityAnnotation``. - """ - bounds = Bounds.from_api_repr(response.get('boundingPoly')) - description = response['description'] - locale = response.get('locale', None) - locations = [LocationInformation.from_api_repr(location) - for location in response.get('locations', ())] - mid = response.get('mid', None) - score = response.get('score', None) - - return cls(bounds, description, locale, locations, mid, score) - - @classmethod - def from_pb(cls, response): - """Factory: construct entity from Vision gRPC response. - - :type response: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb2.AnnotateImageResponse` - :param response: gRPC response from Vision API with entity data. - - :rtype: :class:`~google.cloud.vision.entity.EntityAnnotation` - :returns: Instance of ``EntityAnnotation``. - """ - bounds = Bounds.from_pb(response.bounding_poly) - description = response.description - locale = response.locale - locations = [LocationInformation.from_pb(location) - for location in response.locations] - mid = response.mid - score = response.score - return cls(bounds, description, locale, locations, mid, score) - - @property - def bounds(self): - """Bounding polygon of detected image feature. - - :rtype: :class:`~google.cloud.vision.geometry.Bounds` - :returns: Instance of ``Bounds`` with populated vertices. - """ - return self._bounds - - @property - def description(self): - """Description of feature detected in image. - - :rtype: str - :returns: String description of feature detected in image. - """ - return self._description - - @property - def locale(self): - """The language code for text discovered in an image. - - :rtype: str - :returns: String language code of text found in the image. - """ - return self._locale - - @property - def locations(self): - """Location coordinates landmarks detected. - - :rtype: :class:`~google.cloud.vision.geometry.LocationInformation` - :returns: ``LocationInformation`` populated with latitude and longitude - of object detected in an image. - """ - return self._locations - - @property - def mid(self): - """MID of feature detected in image. - - :rtype: str - :returns: String MID of feature detected in image. - """ - return self._mid - - @property - def score(self): - """Overall score of the result. Range [0, 1]. - - :rtype: float - :returns: Overall score of the result. Range [0, 1]. - """ - return self._score diff --git a/vision/google/cloud/vision/face.py b/vision/google/cloud/vision/face.py deleted file mode 100644 index 36f4ed54b4ca5..0000000000000 --- a/vision/google/cloud/vision/face.py +++ /dev/null @@ -1,575 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Face class representing the Vision API's face detection response.""" - - -from enum import Enum - -from google.cloud.vision.geometry import BoundsBase -from google.cloud.vision.likelihood import _get_pb_likelihood -from google.cloud.vision.likelihood import Likelihood -from google.cloud.vision.geometry import Position - - -class Angles(object): - """Angles representing the positions of a face.""" - def __init__(self, roll, pan, tilt): - self._roll = roll - self._pan = pan - self._tilt = tilt - - @classmethod - def from_api_repr(cls, angle): - """Factory: construct the angles from an Vision API response. - - :type angle: dict - :param angle: Dictionary representation of an angle. - - :rtype: :class:`~google.cloud.vision.face.Angles` - :returns: An `Angles` instance with data parsed from `response`. - """ - roll = angle['rollAngle'] - pan = angle['panAngle'] - tilt = angle['tiltAngle'] - - return cls(roll, pan, tilt) - - @classmethod - def from_pb(cls, angle): - """Factory: convert protobuf Angle object to local Angle object. - - :type angle: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb2.FaceAnnotation` - :param angle: Protobuf ``FaceAnnotation`` response with angle data. - - :rtype: :class:`~google.cloud.vision.face.Angles` - :returns: Instance of ``Angles``. - """ - roll = angle.roll_angle - pan = angle.pan_angle - tilt = angle.tilt_angle - - return cls(roll, pan, tilt) - - @property - def roll(self): - """Roll angle of face. - - :rtype: float - :returns: Roll angle of face in degrees. - """ - return self._roll - - @property - def pan(self): - """Pan angle of face. - - :rtype: float - :returns: Pan angle of face in degrees. - """ - return self._pan - - @property - def tilt(self): - """Tilt angle of face. - - :rtype: float - :returns: Tilt angle of face in degrees. - """ - return self._tilt - - -class Bounds(BoundsBase): - """The bounding polygon of the entire face.""" - - -class Emotions(object): - """Emotions displayed by the face detected in an image.""" - def __init__(self, joy_likelihood, sorrow_likelihood, - surprise_likelihood, anger_likelihood): - self._joy_likelihood = joy_likelihood - self._sorrow_likelihood = sorrow_likelihood - self._surprise_likelihood = surprise_likelihood - self._anger_likelihood = anger_likelihood - - @classmethod - def from_api_repr(cls, emotions): - """Factory: construct ``Emotions`` from Vision API response. - - :type emotions: dict - :param emotions: Response dictionary representing a face. - - :rtype: :class:`~google.cloud.vision.face.Emotions` - :returns: Populated instance of ``Emotions``. - """ - joy_likelihood = Likelihood[emotions['joyLikelihood']] - sorrow_likelihood = Likelihood[emotions['sorrowLikelihood']] - surprise_likelihood = Likelihood[emotions['surpriseLikelihood']] - anger_likelihood = Likelihood[emotions['angerLikelihood']] - - return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood, - anger_likelihood) - - @classmethod - def from_pb(cls, emotions): - """Factory: construct ``Emotions`` from Vision API response. - - :type emotions: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb2.FaceAnnotation` - :param emotions: Response dictionary representing a face with emotions. - - :rtype: :class:`~google.cloud.vision.face.Emotions` - :returns: Populated instance of ``Emotions``. - """ - joy_likelihood = _get_pb_likelihood(emotions.joy_likelihood) - sorrow_likelihood = _get_pb_likelihood(emotions.sorrow_likelihood) - surprise_likelihood = _get_pb_likelihood(emotions.surprise_likelihood) - anger_likelihood = _get_pb_likelihood(emotions.anger_likelihood) - - return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood, - anger_likelihood) - - @property - def anger(self): - """Likelihood of anger in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self._anger_likelihood - - @property - def joy(self): - """Likelihood of joy in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self._joy_likelihood - - @property - def sorrow(self): - """Likelihood of sorrow in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self._sorrow_likelihood - - @property - def surprise(self): - """Likelihood of surprise in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self._surprise_likelihood - - -class Face(object): - """Representation of a face found by the Vision API""" - - def __init__(self, angles, bounds, detection_confidence, emotions, - fd_bounds, headwear_likelihood, image_properties, landmarks, - landmarking_confidence): - self._angles = angles - self._bounds = bounds - self._detection_confidence = detection_confidence - self._emotions = emotions - self._fd_bounds = fd_bounds - self._headwear_likelihood = headwear_likelihood - self._landmarks = landmarks - self._landmarking_confidence = landmarking_confidence - self._image_properties = image_properties - - @classmethod - def from_api_repr(cls, face): - """Factory: construct an instance of a Face from an API response - - :type face: dict - :param face: Face annotation dict returned from the Vision API. - - :rtype: :class:`~google.cloud.vision.face.Face` - :returns: A instance of `Face` with data parsed from `response`. - """ - face_data = { - 'angles': Angles.from_api_repr(face), - 'bounds': Bounds.from_api_repr(face['boundingPoly']), - 'detection_confidence': face['detectionConfidence'], - 'emotions': Emotions.from_api_repr(face), - 'fd_bounds': FDBounds.from_api_repr(face['fdBoundingPoly']), - 'headwear_likelihood': Likelihood[face['headwearLikelihood']], - 'image_properties': FaceImageProperties.from_api_repr(face), - 'landmarks': Landmarks.from_api_repr(face['landmarks']), - 'landmarking_confidence': face['landmarkingConfidence'], - } - return cls(**face_data) - - @classmethod - def from_pb(cls, face): - """Factory: construct an instance of a Face from an protobuf response - - :type face: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb2.AnnotateImageResponse` - :param face: ``AnnotateImageResponse`` from gRPC call. - - :rtype: :class:`~google.cloud.vision.face.Face` - :returns: A instance of `Face` with data parsed from ``response``. - """ - face_data = { - 'angles': Angles.from_pb(face), - 'bounds': Bounds.from_pb(face.bounding_poly), - 'detection_confidence': face.detection_confidence, - 'emotions': Emotions.from_pb(face), - 'fd_bounds': FDBounds.from_pb(face.fd_bounding_poly), - 'headwear_likelihood': _get_pb_likelihood( - face.headwear_likelihood), - 'image_properties': FaceImageProperties.from_pb(face), - 'landmarks': Landmarks.from_pb(face.landmarks), - 'landmarking_confidence': face.landmarking_confidence, - } - return cls(**face_data) - - @property - def anger(self): - """Accessor to likelihood that the detected face is angry. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self.emotions.anger - - @property - def angles(self): - """Accessor to the pan, tilt and roll angles of a Face. - - :rtype: :class:`~google.cloud.vision.face.Angles` - :returns: Pan, tilt and roll angles of the detected face. - """ - - return self._angles - - @property - def bounds(self): - """Accessor to the bounding poly information of the detected face. - - :rtype: :class:`~google.cloud.vision.face.Bounds` - :returns: An instance of ``Bounds`` which has a list of vertices. - """ - return self._bounds - - @property - def detection_confidence(self): - """Face detection confidence score determined by the Vision API. - - :rtype: float - :returns: Float representation of confidence ranging from 0 to 1. - """ - return self._detection_confidence - - @property - def emotions(self): - """Accessor to the possible emotions expressed in the detected face. - - :rtype: :class:`~google.cloud.vision.face.Emotions` - :returns: An instance of ``Emotions`` with joy, sorrow, anger, surprise - likelihood. - """ - return self._emotions - - @property - def fd_bounds(self): - """Accessor to the skin area bounding poly of the detected face. - - :rtype: :class:`~google.cloud.vision.image.FDBounds` - :returns: An instance of ``FDBounds`` which has a list of vertices. - """ - return self._fd_bounds - - @property - def headwear(self): - """Headwear likelihood. - - :rtype: :class:`~google.cloud.vision.face.Likelihood` - :returns: String representing the likelihood based on - :class:`~google.cloud.vision.face.Likelihood` - """ - return self._headwear_likelihood - - @property - def image_properties(self): - """Image properties from imaged used in face detection. - - :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` - :returns: ``FaceImageProperties`` object with image properties. - """ - return self._image_properties - - @property - def joy(self): - """Likelihood of joy in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self.emotions.joy - - @property - def landmarks(self): - """Accessor to the facial landmarks detected in a face. - - :rtype: :class:`~google.cloud.vision.face.Landmarks` - :returns: ``Landmarks`` object with facial landmarks as properies. - """ - return self._landmarks - - @property - def landmarking_confidence(self): - """Landmarking confidence score determinged by the Vision API. - - :rtype: float - :returns: Float representing the confidence of the Vision API in - determining the landmarks on a face. - """ - return self._landmarking_confidence - - @property - def sorrow(self): - """Likelihood of sorrow in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self.emotions.sorrow - - @property - def surprise(self): - """Likelihood of surprise in detected face. - - :rtype: str - :returns: String derived from - :class:`~google.cloud.vision.face.Likelihood`. - """ - return self.emotions.surprise - - -class FaceImageProperties(object): - """A representation of the image properties from face detection.""" - def __init__(self, blurred_likelihood, underexposed_likelihood): - self._blurred_likelihood = blurred_likelihood - self._underexposed_likelihood = underexposed_likelihood - - @classmethod - def from_api_repr(cls, face): - """Factory: construct image properties from image. - - :type face: dict - :param face: Dictionary representation of a ``Face``. - - :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` - :returns: Instance populated with image property data. - """ - blurred = Likelihood[face['blurredLikelihood']] - underexposed = Likelihood[face['underExposedLikelihood']] - - return cls(blurred, underexposed) - - @classmethod - def from_pb(cls, face): - """Factory: construct image properties from image. - - :type face: :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.\ - FaceAnnotation` - :param face: Protobuf instace of `Face`. - - :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` - :returns: Instance populated with image property data. - """ - blurred = _get_pb_likelihood(face.blurred_likelihood) - underexposed = _get_pb_likelihood(face.under_exposed_likelihood) - - return cls(blurred, underexposed) - - @property - def blurred(self): - """Likelihood of the image being blurred. - - :rtype: str - :returns: String representation derived from - :class:`~google.cloud.vision.face.Position`. - """ - return self._blurred_likelihood - - @property - def underexposed(self): - """Likelihood that the image used for detection was underexposed. - - :rtype: str - :returns: String representation derived from - :class:`~google.cloud.vision.face.Position`. - """ - return self._underexposed_likelihood - - -class LandmarkTypes(Enum): - """A representation of the face detection landmark types. - - See: - https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#type_1 - """ - UNKNOWN_LANDMARK = 0 - LEFT_EYE = 1 - RIGHT_EYE = 2 - LEFT_OF_LEFT_EYEBROW = 3 - RIGHT_OF_LEFT_EYEBROW = 4 - LEFT_OF_RIGHT_EYEBROW = 5 - RIGHT_OF_RIGHT_EYEBROW = 6 - MIDPOINT_BETWEEN_EYES = 7 - NOSE_TIP = 8 - UPPER_LIP = 9 - LOWER_LIP = 10 - MOUTH_LEFT = 11 - MOUTH_RIGHT = 12 - MOUTH_CENTER = 13 - NOSE_BOTTOM_RIGHT = 14 - NOSE_BOTTOM_LEFT = 15 - NOSE_BOTTOM_CENTER = 16 - LEFT_EYE_TOP_BOUNDARY = 17 - LEFT_EYE_RIGHT_CORNER = 18 - LEFT_EYE_BOTTOM_BOUNDARY = 19 - LEFT_EYE_LEFT_CORNER = 20 - RIGHT_EYE_TOP_BOUNDARY = 21 - RIGHT_EYE_RIGHT_CORNER = 22 - RIGHT_EYE_BOTTOM_BOUNDARY = 23 - RIGHT_EYE_LEFT_CORNER = 24 - LEFT_EYEBROW_UPPER_MIDPOINT = 25 - RIGHT_EYEBROW_UPPER_MIDPOINT = 26 - LEFT_EAR_TRAGION = 27 - RIGHT_EAR_TRAGION = 28 - LEFT_EYE_PUPIL = 29 - RIGHT_EYE_PUPIL = 30 - FOREHEAD_GLABELLA = 31 - CHIN_GNATHION = 32 - CHIN_LEFT_GONION = 33 - CHIN_RIGHT_GONION = 34 - - -class FDBounds(BoundsBase): - """The bounding polygon of just the skin portion of the face.""" - - -class Landmark(object): - """A face-specific landmark (for example, a face feature, left eye). - - :type landmark_type: :class:`~google.cloud.vision.face.LandmarkTypes` - :param landmark_type: Instance of ``LandmarkTypes``. - - :type position: :class:`~google.cloud.vision.face.Position` - :param position: - """ - def __init__(self, position, landmark_type): - self._position = position - self._landmark_type = landmark_type - - @classmethod - def from_api_repr(cls, landmark): - """Factory: construct an instance of a Landmark from a response. - - :type landmark: dict - :param landmark: Landmark representation from Vision API. - - :rtype: :class:`~google.cloud.vision.face.Landmark` - :returns: Populated instance of ``Landmark``. - """ - position = Position.from_api_repr(landmark['position']) - landmark_type = LandmarkTypes[landmark['type']] - return cls(position, landmark_type) - - @classmethod - def from_pb(cls, landmark): - """Factory: construct an instance of a Landmark from a response. - - :type landmark: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb.FaceAnnotation.Landmark` - :param landmark: Landmark representation from Vision API. - - :rtype: :class:`~google.cloud.vision.face.Landmark` - :returns: Populated instance of ``Landmark``. - """ - position = Position.from_pb(landmark.position) - landmark_type = LandmarkTypes(landmark.type) - return cls(position, landmark_type) - - @property - def position(self): - """Landmark position on face. - - :rtype: :class:`~google.cloud.vision.face.Position` - :returns: Instance of `Position` with landmark coordinates. - """ - return self._position - - @property - def landmark_type(self): - """Landmark type of facial feature. - - :rtype: str - :returns: String representation of facial landmark type. - """ - return self._landmark_type - - -class Landmarks(object): - """Landmarks detected on a face represented as properties. - - :type landmarks: list - :param landmarks: List of :class:`~google.cloud.vision.face.Landmark`. - """ - def __init__(self, landmarks): - for landmark in landmarks: - setattr(self, landmark.landmark_type.name.lower(), landmark) - - @classmethod - def from_api_repr(cls, landmarks): - """Factory: construct facial landmarks from Vision API response. - - :type landmarks: dict - :param landmarks: JSON face annotation. - - :rtype: :class:`~google.cloud.vision.face.Landmarks` - :returns: Instance of ``Landmarks`` populated with facial landmarks. - """ - return cls([Landmark.from_api_repr(landmark) - for landmark in landmarks]) - - @classmethod - def from_pb(cls, landmarks): - """Factory: construct facial landmarks from Vision gRPC response. - - :type landmarks: :class:`~google.protobuf.internal.containers.\ - RepeatedCompositeFieldContainer` - :param landmarks: List of facial landmarks. - - :rtype: :class:`~google.cloud.vision.face.Landmarks` - :returns: Instance of ``Landmarks`` populated with facial landmarks. - """ - return cls([Landmark.from_pb(landmark) for landmark in landmarks]) diff --git a/vision/google/cloud/vision/feature.py b/vision/google/cloud/vision/feature.py deleted file mode 100644 index 2a2b5b2d6ef7a..0000000000000 --- a/vision/google/cloud/vision/feature.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Feature represenging various types of annotating.""" - - -class FeatureTypes(object): - """Feature Types to indication which annotations to perform. - - See: - https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Type - """ - CROP_HINTS = 'CROP_HINTS' - DOCUMENT_TEXT_DETECTION = 'DOCUMENT_TEXT_DETECTION' - FACE_DETECTION = 'FACE_DETECTION' - IMAGE_PROPERTIES = 'IMAGE_PROPERTIES' - LABEL_DETECTION = 'LABEL_DETECTION' - LANDMARK_DETECTION = 'LANDMARK_DETECTION' - LOGO_DETECTION = 'LOGO_DETECTION' - SAFE_SEARCH_DETECTION = 'SAFE_SEARCH_DETECTION' - TEXT_DETECTION = 'TEXT_DETECTION' - WEB_DETECTION = 'WEB_DETECTION' - - -class Feature(object): - """Feature object specifying the annotation type and maximum results. - - :type feature_type: str - :param feature_type: String representation of - :class:`~google.cloud.vision.feature.FeatureType`. - - :type max_results: int - :param max_results: Number of results to return for the specified - feature type. - - See: - https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Feature - """ - def __init__(self, feature_type, max_results=1): - try: - self._feature_type = getattr(FeatureTypes, feature_type) - except AttributeError: - raise AttributeError('Feature type passed in cannot be found.') - self._max_results = int(max_results) - - def as_dict(self): - """Generate dictionary for Feature request format. - - :rtype: dict - :returns: Dictionary representation of a - :class:`~google.cloud.vision.feature.FeatureType`. - """ - return { - 'type': self.feature_type, - 'maxResults': self.max_results - } - - @property - def feature_type(self): - """"Feature type string. - - :rtype: :class:`~google.cloud.vision.feature.FeatureTypes` - :returns: Instance of - :class:`~google.cloud.vision.feature.FeatureTypes` - """ - return self._feature_type - - @property - def max_results(self): - """Maximum number of results for feature type. - - :rtype: int - :returns: Maxium results to be returned. - """ - return self._max_results diff --git a/vision/google/cloud/vision/geometry.py b/vision/google/cloud/vision/geometry.py deleted file mode 100644 index 9779282ef2400..0000000000000 --- a/vision/google/cloud/vision/geometry.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Geometry and other generic classes used by the Vision API.""" - - -class BoundsBase(object): - """Base class for handling bounds with vertices. - - :type vertices: list of :class:`~google.cloud.vision.geometry.Vertex` - :param vertices: List of vertcies describing points on an image. - """ - def __init__(self, vertices): - self._vertices = vertices - - @classmethod - def from_api_repr(cls, vertices): - """Factory: construct BoundsBase instance from Vision API response. - - :type vertices: dict - :param vertices: List of vertices. - - :rtype: :class:`~google.cloud.vision.geometry.BoundsBase` or None - :returns: Instance of ``BoundsBase`` with populated verticies or None. - """ - if vertices is None: - return None - return cls([Vertex(vertex.get('x', None), vertex.get('y', None)) - for vertex in vertices.get('vertices', ())]) - - @classmethod - def from_pb(cls, vertices): - """Factory: construct BoundsBase instance from a protobuf response. - - :type vertices: :class:`~google.cloud.proto.vision.v1.\ - geometry_pb2.BoundingPoly` - :param vertices: List of vertices. - - :rtype: :class:`~google.cloud.vision.geometry.BoundsBase` or None - :returns: Instance of ``BoundsBase`` with populated verticies. - """ - return cls([Vertex(vertex.x, vertex.y) - for vertex in vertices.vertices]) - - @property - def vertices(self): - """List of vertices. - - :rtype: list of :class:`~google.cloud.vision.geometry.Vertex` - :returns: List of populated vertices. - """ - return self._vertices - - -class Bounds(BoundsBase): - """A polygon boundry of the detected feature.""" - - -class FDBounds(BoundsBase): - """The bounding polygon of just the skin portion of the face.""" - - -class LocationInformation(object): - """Representation of location information returned by the Vision API. - - :type latitude: float - :param latitude: Latitude coordinate of geographical location. - - :type longitude: float - :param longitude: Longitude coordinate of geographical location. - """ - def __init__(self, latitude, longitude): - self._latitude = latitude - self._longitude = longitude - - @classmethod - def from_api_repr(cls, location_info): - """Factory: construct location information from Vision API response. - - :type location_info: dict - :param location_info: Dictionary response of locations. - - :rtype: :class:`~google.cloud.vision.geometry.LocationInformation` - :returns: ``LocationInformation`` with populated latitude and - longitude. - """ - lat_long = location_info.get('latLng', {}) - latitude = lat_long.get('latitude') - longitude = lat_long.get('longitude') - return cls(latitude, longitude) - - @classmethod - def from_pb(cls, location_info): - """Factory: construct location information from a protobuf response. - - :type location_info: :class:`~google.cloud.vision.v1.LocationInfo` - :param location_info: Protobuf response with ``LocationInfo``. - - :rtype: :class:`~google.cloud.vision.geometry.LocationInformation` - :returns: ``LocationInformation`` with populated latitude and - longitude. - """ - return cls(location_info.lat_lng.latitude, - location_info.lat_lng.longitude) - - @property - def latitude(self): - """Latitude coordinate. - - :rtype: float - :returns: Latitude coordinate of location. - """ - return self._latitude - - @property - def longitude(self): - """Longitude coordinate. - - :rtype: float - :returns: Longitude coordinate of location. - """ - return self._longitude - - -class Position(object): - """A 3D position in the image. - - See: - https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Position - - :type x_coordinate: float - :param x_coordinate: X position coordinate. - - :type y_coordinate: float - :param y_coordinate: Y position coordinate. - - :type z_coordinate: float - :param z_coordinate: Z position coordinate. - """ - def __init__(self, x_coordinate=None, y_coordinate=None, - z_coordinate=None): - self._x_coordinate = x_coordinate - self._y_coordinate = y_coordinate - self._z_coordinate = z_coordinate - - @classmethod - def from_api_repr(cls, position): - """Factory: construct 3D position from API response. - - :type position: dict - :param position: Dictionary with 3 axis position data. - - :rtype: :class:`~google.cloud.vision.geometry.Position` - :returns: ``Position`` constructed with 3D points from API response. - """ - x_coordinate = position['x'] - y_coordinate = position['y'] - z_coordinate = position['z'] - return cls(x_coordinate, y_coordinate, z_coordinate) - - @classmethod - def from_pb(cls, response_position): - """Factory: construct 3D position from API response. - - :rtype: :class:`~google.cloud.vision.geometry.Position` - :returns: ``Position`` constructed with 3D points from API response. - """ - x_coordinate = response_position.x - y_coordinate = response_position.y - z_coordinate = response_position.z - return cls(x_coordinate, y_coordinate, z_coordinate) - - @property - def x_coordinate(self): - """X position coordinate. - - :rtype: float - :returns: X position coordinate. - """ - return self._x_coordinate - - @property - def y_coordinate(self): - """Y position coordinate. - - :rtype: float - :returns: Y position coordinate. - """ - return self._y_coordinate - - @property - def z_coordinate(self): - """Z position coordinate. - - :rtype: float - :returns: Z position coordinate. - """ - return self._z_coordinate - - -class Vertex(object): - """A vertex represents a 2D point in the image. - - See: - https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Vertex - - :type x_coordinate: float - :param x_coordinate: X position coordinate. - - :type y_coordinate: float - :param y_coordinate: Y position coordinate. - """ - def __init__(self, x_coordinate=None, y_coordinate=None): - self._x_coordinate = x_coordinate - self._y_coordinate = y_coordinate - - @property - def x_coordinate(self): - """X position coordinate. - - :rtype: float - :returns: X position coordinate. - """ - return self._x_coordinate - - @property - def y_coordinate(self): - """Y position coordinate. - - :rtype: float - :returns: Y position coordinate. - """ - return self._y_coordinate diff --git a/vision/google/cloud/vision/helpers.py b/vision/google/cloud/vision/helpers.py new file mode 100644 index 0000000000000..6dbe4742ab7af --- /dev/null +++ b/vision/google/cloud/vision/helpers.py @@ -0,0 +1,69 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import collections +import six + +from google.gax.utils import protobuf + + +class VisionHelpers(object): + """A set of convenience methods to make the Vision GAPIC easier to use. + + This class should be considered abstract; it is used as a superclass + in a multiple-inheritance construction alongside the applicable GAPIC. + See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`. + """ + def annotate_image(self, request, options=None): + """Run image detection and annotation for an image. + + Example: + >>> from google.cloud.vision_v1 import ImageAnnotatorClient + >>> client = ImageAnnotatorClient() + >>> request = { + ... 'image': { + ... 'source': {'image_uri': 'https://foo.com/image.jpg'}, + ... }, + ... } + >>> response = client.annotate_image(request) + + Args: + request (:class:`~vision_v1.image_annotator.AnnotateImageRequest`) + options (:class:`google.gax.CallOptions`): Overrides the default + settings for this call, e.g, timeout, retries, etc. + + Returns: + :class:`~vision_v1.image_annotator.AnnotateImageResponse` + """ + # This method allows features not to be specified, and you get all + # of them. + protobuf.setdefault(request, 'features', self._get_all_features()) + r = self.batch_annotate_images([request], options=options) + return r.responses[0] + + def _get_all_features(self): + """Return a list of all features. + + Returns: + list: A list of all available features. + """ + answer = [] + for key, value in self.enums.Feature.Type.__dict__.items(): + if key.upper() != key: + continue + if not isinstance(value, int) or value == 0: + continue + answer.append({'type': value}) + return answer diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py deleted file mode 100644 index f96103d6fcdd7..0000000000000 --- a/vision/google/cloud/vision/image.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Image represented by either a URI or byte stream.""" - - -from base64 import b64encode - -from google.cloud.proto.vision.v1 import image_annotator_pb2 - -from google.cloud.vision._gax import _to_gapic_image -from google.cloud._helpers import _to_bytes -from google.cloud._helpers import _bytes_to_unicode -from google.cloud.vision.feature import Feature -from google.cloud.vision.feature import FeatureTypes - - -class Image(object): - """Image representation containing information to be annotate. - - :type content: bytes - :param content: Byte stream of an image. - - :type filename: str - :param filename: Filename to image. - - :type source_uri: str - :param source_uri: URL or Google Cloud Storage URI of image. - - :type client: :class:`~google.cloud.vision.client.Client` - :param client: Instance of Vision client. - """ - - def __init__(self, client, content=None, filename=None, source_uri=None): - sources = [source for source in (content, filename, source_uri) - if source is not None] - if len(sources) != 1: - raise ValueError( - 'Specify exactly one of "content", "filename", or ' - '"source_uri".') - - self.client = client - - if filename is not None: - with open(filename, 'rb') as file_obj: - content = file_obj.read() - - if content is not None: - content = _to_bytes(content) - - self._content = content - self._source = source_uri - - def as_dict(self): - """Generate dictionary structure for request. - - :rtype: dict - :returns: Dictionary with source information for image. - """ - if self.content: - return { - 'content': _bytes_to_unicode(b64encode(self.content)) - } - elif self.source.startswith('gs://'): - return { - 'source': { - 'gcs_image_uri': self.source - } - } - elif self.source.startswith(('http://', 'https://')): - return { - 'source': { - 'image_uri': self.source - } - } - raise ValueError('No image content or source found.') - - @property - def content(self): - """Base64 encoded image content. - - :rtype: str - :returns: Base64 encoded image bytes. - """ - return self._content - - @property - def source(self): - """Google Cloud Storage URI. - - :rtype: str - :returns: String of Google Cloud Storage URI. - """ - return self._source - - def _detect_annotation(self, images): - """Generic method for detecting annotations. - - :type images: list - :param images: List of :class:`~google.cloud.vision.image.Image`. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.annotations.Annotations`. - """ - return self.client._vision_api.annotate(images) - - def _detect_annotation_from_pb(self, requests_pb=None): - """Helper for pre-made requests. - - :type requests_pb: list - :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ - image_annotator_pb2.AnnotateImageRequest` - - :rtype: :class:`~google.cloud.vision.annotations.Annotations` - :returns: Instance of ``Annotations``. - """ - return self.client._vision_api.annotate(self, requests_pb=requests_pb) - - def detect(self, features): - """Detect multiple feature types. - - :type features: list of :class:`~google.cloud.vision.feature.Feature` - :param features: List of the ``Feature`` indication the type of - annotation to perform. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.entity.EntityAnnotation`. - """ - images = ((self, features),) - return self._detect_annotation(images) - - def detect_crop_hints(self, aspect_ratios=None, limit=10): - """Detect crop hints in image. - - :type aspect_ratios: list - :param aspect_ratios: (Optional) List of floats i.e. 4/3 == 1.33333. A - maximum of 16 aspect ratios can be given. - - :type limit: int - :param limit: (Optional) The number of crop hints to detect. - - :rtype: list - :returns: List of :class:`~google.cloud.vision.crop_hint.CropHints`. - """ - feature_type = image_annotator_pb2.Feature.CROP_HINTS - feature = image_annotator_pb2.Feature(type=feature_type, - max_results=limit) - image = _to_gapic_image(self) - crop_hints_params = image_annotator_pb2.CropHintsParams( - aspect_ratios=aspect_ratios) - image_context = image_annotator_pb2.ImageContext( - crop_hints_params=crop_hints_params) - request = image_annotator_pb2.AnnotateImageRequest( - image=image, features=[feature], image_context=image_context) - - annotations = self._detect_annotation_from_pb([request]) - return annotations[0].crop_hints - - def detect_faces(self, limit=10): - """Detect faces in image. - - :type limit: int - :param limit: The number of faces to try and detect. - - :rtype: list - :returns: List of :class:`~google.cloud.vision.face.Face`. - """ - features = [Feature(FeatureTypes.FACE_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].faces - - def detect_full_text(self, language_hints=None, limit=10): - """Detect a full document's text. - - :type language_hints: list - :param language_hints: (Optional) A list of BCP-47 language codes. See: - https://cloud.google.com/vision/docs/languages - - :type limit: int - :param limit: (Optional) The number of documents to detect. - - :rtype: list - :returns: List of :class:`~google.cloud.vision.text.TextAnnotation`. - """ - feature_type = image_annotator_pb2.Feature.DOCUMENT_TEXT_DETECTION - feature = image_annotator_pb2.Feature(type=feature_type, - max_results=limit) - image = _to_gapic_image(self) - image_context = image_annotator_pb2.ImageContext( - language_hints=language_hints) - request = image_annotator_pb2.AnnotateImageRequest( - image=image, features=[feature], image_context=image_context) - annotations = self._detect_annotation_from_pb([request]) - return annotations[0].full_texts - - def detect_labels(self, limit=10): - """Detect labels that describe objects in an image. - - :type limit: int - :param limit: The maximum number of labels to try and detect. - - :rtype: list - :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation` - """ - features = [Feature(FeatureTypes.LABEL_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].labels - - def detect_landmarks(self, limit=10): - """Detect landmarks in an image. - - :type limit: int - :param limit: The maximum number of landmarks to find. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.entity.EntityAnnotation`. - """ - features = [Feature(FeatureTypes.LANDMARK_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].landmarks - - def detect_logos(self, limit=10): - """Detect logos in an image. - - :type limit: int - :param limit: The maximum number of logos to find. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.entity.EntityAnnotation`. - """ - features = [Feature(FeatureTypes.LOGO_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].logos - - def detect_properties(self, limit=10): - """Detect the color properties of an image. - - :type limit: int - :param limit: The maximum number of image properties to find. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. - """ - features = [Feature(FeatureTypes.IMAGE_PROPERTIES, limit)] - annotations = self.detect(features) - return annotations[0].properties - - def detect_safe_search(self, limit=10): - """Retreive safe search properties from an image. - - :type limit: int - :param limit: The number of faces to try and detect. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.sage.SafeSearchAnnotation`. - """ - features = [Feature(FeatureTypes.SAFE_SEARCH_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].safe_searches - - def detect_text(self, limit=10): - """Detect text in an image. - - :type limit: int - :param limit: The maximum instances of text to find. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.entity.EntityAnnotation`. - """ - features = [Feature(FeatureTypes.TEXT_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].texts - - def detect_web(self, limit=10): - """Detect similar images elsewhere on the web. - - :type limit: int - :param limit: The maximum instances of text to find. - - :rtype: list - :returns: List of - :class:`~google.cloud.vision.entity.EntityAnnotation`. - """ - features = [Feature(FeatureTypes.WEB_DETECTION, limit)] - annotations = self.detect(features) - return annotations[0].web diff --git a/vision/google/cloud/vision/likelihood.py b/vision/google/cloud/vision/likelihood.py deleted file mode 100644 index 6fffc66407393..0000000000000 --- a/vision/google/cloud/vision/likelihood.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Likelihood constants returned from Vision API.""" - - -from enum import Enum - -from google.cloud.proto.vision.v1 import image_annotator_pb2 - - -def _get_pb_likelihood(likelihood): - """Convert protobuf Likelihood integer value to Likelihood enum. - - :type likelihood: int - :param likelihood: Protobuf integer representing ``Likelihood``. - - :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` - :returns: Enum ``Likelihood`` converted from protobuf value. - """ - likelihood_pb = image_annotator_pb2.Likelihood.Name(likelihood) - return Likelihood[likelihood_pb] - - -class Likelihood(Enum): - """A representation of likelihood to give stable results across upgrades. - - See: - https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#likelihood - """ - UNKNOWN = 'UNKNOWN' - VERY_UNLIKELY = 'VERY_UNLIKELY' - UNLIKELY = 'UNLIKELY' - POSSIBLE = 'POSSIBLE' - LIKELY = 'LIKELY' - VERY_LIKELY = 'VERY_LIKELY' diff --git a/vision/google/cloud/vision/safe_search.py b/vision/google/cloud/vision/safe_search.py deleted file mode 100644 index d439d9ed60155..0000000000000 --- a/vision/google/cloud/vision/safe_search.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Safe search class for information returned from annotating an image.""" - -from google.cloud.vision.likelihood import _get_pb_likelihood -from google.cloud.vision.likelihood import Likelihood - - -class SafeSearchAnnotation(object): - """Representation of a SafeSearchAnnotation. - - :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood` - :param adult_likelihood: Likelihood that image contains adult material. - - :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood` - :param spoof_likelihood: Likelihood that image is a spoof. - - :type medical_likelihood: - :class:`~google.cloud.vision.likelihood.Likelihood` - :param medical_likelihood: Likelihood that image contains medical material. - - :type violence_likelihood: - :class:`~google.cloud.vision.likelihood.Likelihood` - :param violence_likelihood: Likelihood that image contains violence. - """ - - def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood, - violence_likelihood): - self._adult_likelihood = adult_likelihood - self._spoof_likelihood = spoof_likelihood - self._medical_likeliehood = medical_likelihood - self._violence_likelihood = violence_likelihood - - @classmethod - def from_api_repr(cls, response): - """Factory: construct SafeSearchAnnotation from Vision API response. - - :type response: dict - :param response: Dictionary response from Vision API with safe search - data. - - :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` - :returns: Instance of ``SafeSearchAnnotation``. - """ - adult_likelihood = Likelihood[response['adult']] - spoof_likelihood = Likelihood[response['spoof']] - medical_likelihood = Likelihood[response['medical']] - violence_likelihood = Likelihood[response['violence']] - - return cls(adult_likelihood, spoof_likelihood, medical_likelihood, - violence_likelihood) - - @classmethod - def from_pb(cls, image): - """Factory: construct SafeSearchAnnotation from Vision API response. - - :type image: :class:`~google.cloud.proto.vision.v1.\ - image_annotator_pb2.SafeSearchAnnotation` - :param image: Protobuf response from Vision API with safe search data. - - :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` - :returns: Instance of ``SafeSearchAnnotation``. - """ - values = [image.adult, image.spoof, image.medical, image.violence] - classifications = map(_get_pb_likelihood, values) - return cls(*classifications) - - @property - def adult(self): - """Represents the adult contents likelihood for the image. - - :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` - :returns: ``Likelihood`` of the image containing adult content. - """ - return self._adult_likelihood - - @property - def spoof(self): - """The likelihood that an obvious modification was made to the image. - - :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` - :returns: The ``Likelihood`` that an obvious modification was made to - the image's canonical version to make it appear funny or - offensive. - """ - return self._spoof_likelihood - - @property - def medical(self): - """Likelihood this is a medical image. - - :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` - :returns: The ``Likelihood`` that the image is medical in origin. - """ - return self._medical_likeliehood - - @property - def violence(self): - """Likeliehood that this image contains violence. - - :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` - :returns: The ``Likelihood`` that the image contains violence. - """ - return self._violence_likelihood diff --git a/vision/google/cloud/vision/text.py b/vision/google/cloud/vision/text.py deleted file mode 100644 index b903c3547b4b8..0000000000000 --- a/vision/google/cloud/vision/text.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Text annotations of an image.""" - -import json - -from google.cloud.proto.vision.v1 import text_annotation_pb2 -from google.protobuf import json_format - - -class TextAnnotation(object): - """Representation of a text annotation returned from the Vision API. - - :type pages: list - :param pages: List of - :class:`~google.cloud.proto.vision.v1.\ - text_annotation_pb2.Page`. - - :type text: str - :param text: String containing text detected from the image. - """ - def __init__(self, pages, text): - self._pages = pages - self._text = text - - @classmethod - def from_api_repr(cls, annotation): - """Factory: construct an instance of ``TextAnnotation`` from JSON. - - :type annotation: dict - :param annotation: Dictionary response from Vision API. - - :rtype: :class:`~google.cloud.vision.text.TextAnnotation` - :returns: Instance of ``TextAnnotation``. - """ - annotation_json = json.dumps(annotation) - text_annotation = text_annotation_pb2.TextAnnotation() - json_format.Parse(annotation_json, text_annotation) - return cls(text_annotation.pages, text_annotation.text) - - @classmethod - def from_pb(cls, annotation): - """Factory: construct an instance of ``TextAnnotation`` from protobuf. - - :type annotation: :class:`~google.cloud.proto.vision.v1.\ - text_annotation_pb2.TextAnnotation` - :param annotation: Populated instance of ``TextAnnotation``. - - :rtype: :class:`~google.cloud.vision.text.TextAnnotation` - :returns: Populated instance of ``TextAnnotation``. - """ - return cls(annotation.pages, annotation.text) - - @property - def pages(self): - """Pages found in text image. - - :rtype: list - :returns: List of :class:`~google.cloud.proto.vision.v1.\ - text_annotation_pb2.Page`. - """ - return self._pages - - @property - def text(self): - """Text detected from an image. - - :rtype: str - :returns: String of text found in an image. - """ - return self._text diff --git a/vision/google/cloud/vision/web.py b/vision/google/cloud/vision/web.py deleted file mode 100644 index 01e8cb2c68992..0000000000000 --- a/vision/google/cloud/vision/web.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Web image search.""" - - -class WebDetection(object): - """Representation of a web detection sent from the Vision API. - - :type web_entities: list - :param web_entities: List of - :class:`google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.WebEntity`. - - :type full_matching_images: list - :param full_matching_images: List of - :class:`google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.WebImage`. - - :type partial_matching_images: list - :param partial_matching_images: List of - :class:`google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.WebImage`. - - :type pages_with_matching_images: list - :param pages_with_matching_images: List of - :class:`google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.\ - WebPage`. - """ - def __init__(self, web_entities=(), full_matching_images=(), - partial_matching_images=(), pages_with_matching_images=()): - self._web_entities = web_entities - self._full_matching_images = full_matching_images - self._partial_matching_images = partial_matching_images - self._pages_with_matching_images = pages_with_matching_images - - @classmethod - def from_api_repr(cls, detection): - """Factory: construct ``WebDetection`` from Vision API response. - - :type detection: dict - :param detection: Dictionary representing a ``WebDetection``. - - :rtype: :class:`~google.cloud.vision.web.WebDetection` - :returns: Populated instance of ``WebDetection``. - """ - web_entities = detection.get('webEntities') - full_matching_images = detection.get('fullMatchingImages') - partial_matching_images = detection.get('partialMatchingImages') - pages_with_matching_images = detection.get('pagesWithMatchingImages') - - web_detection = { - 'web_entities': [WebEntity.from_api_repr(web_entity) - for web_entity in web_entities], - 'full_matching_images': [WebImage.from_api_repr(web_image) - for web_image in full_matching_images], - 'partial_matching_images': [WebImage.from_api_repr(web_image) - for web_image - in partial_matching_images], - 'pages_with_matching_images': [WebPage.from_api_repr(web_page) - for web_page - in pages_with_matching_images], - } - return cls(**web_detection) - - @classmethod - def from_pb(cls, detection): - """Factory: construct ``WebDetection`` from Vision API response. - - :type detection: :class:`~google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection` - :param detection: Dictionary representing a ``WebDetection``. - - :rtype: :class:`~google.cloud.vision.web.WebDetection` - :returns: Populated instance of ``WebDetection``. - """ - web_entities = [WebEntity.from_pb(web_entity) - for web_entity in detection.web_entities] - full_image_matches = [WebImage.from_pb(web_image) - for web_image in detection.full_matching_images] - partial_image_matches = [WebImage.from_pb(web_image) - for web_image - in detection.partial_matching_images] - pages_with_images = [WebPage.from_pb(web_page) - for web_page - in detection.pages_with_matching_images] - return cls(web_entities, full_image_matches, partial_image_matches, - pages_with_images) - - @property - def web_entities(self): - """Return the web entities. - - :rtype: list - :returns: A list of ``WebEntity`` instances. - """ - return self._web_entities - - @property - def full_matching_images(self): - """Return the full matching images. - - :rtype: list - :returns: A list of ``WebImage`` instances. - """ - return self._full_matching_images - - @property - def partial_matching_images(self): - """Return the partially matching images. - - :rtype: list - :returns: A list of ``WebImage`` instances. - """ - return self._partial_matching_images - - @property - def pages_with_matching_images(self): - """Return the web pages with matching images. - - :rtype: list - :returns: A list of ``WebPage`` instances. - """ - return self._pages_with_matching_images - - -class WebEntity(object): - """Object containing a web entity sent from the Vision API. - - :type entity_id: str - :param entity_id: ID string for the entity. - - :type score: float - :param score: Overall relevancy score for the entity. - - :type description: str - :param description: Description of the entity. - """ - - def __init__(self, entity_id, score, description): - self._entity_id = entity_id - self._score = score - self._description = description - - @classmethod - def from_api_repr(cls, web_entity): - """Factory: construct ``WebImage`` from Vision API response. - - :type web_entity: dict - :param web_entity: Dictionary representing a web entity - - :rtype: :class:`~google.cloud.vision.web.WebEntity` - :returns: Populated instance of ``WebEntity``. - """ - return cls(web_entity.get('entityId'), web_entity.get('score'), - web_entity.get('description')) - - @classmethod - def from_pb(cls, web_entity): - """Factory: construct ``WebEntity`` from Vision API response. - - :type web_entity: :class:`~google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.WebEntity` - :param web_entity: Dictionary representing a web entity - - :rtype: :class:`~google.cloud.vision.web.WebEntity` - :returns: Populated instance of ``WebEntity``. - """ - return cls(web_entity.entity_id, web_entity.score, - web_entity.description) - - @property - def entity_id(self): - """The entity ID. - - :rtype: str - :returns: String representing the entity ID. Opaque. - """ - return self._entity_id - - @property - def score(self): - """Overall relevancy score for the image. - - .. note:: - - Not normalized nor comparable between requests. - - :rtype: float - :returns: Relevancy score as a float. - """ - return self._score - - @property - def description(self): - """Canonical description of the entity, in English. - - :rtype: str - :returns: Description of the entity. - """ - return self._description - - -class WebImage(object): - """Object containing image information elsewhere on the web. - - :type url: str - :param url: URL of the matched image. - - :type score: float - :param score: Overall relevancy score of the image. - """ - def __init__(self, url, score): - self._url = url - self._score = score - - @classmethod - def from_api_repr(cls, web_image): - """Factory: construct ``WebImage`` from Vision API response. - - :type web_image: dict - :param web_image: Dictionary representing a web image - - :rtype: :class:`~google.cloud.vision.web.WebImage` - :returns: Populated instance of ``WebImage``. - """ - return cls(web_image['url'], web_image['score']) - - @classmethod - def from_pb(cls, web_image): - """Factory: construct ``WebImage`` from Vision API response. - - :type web_image: :class:`~google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.WebImage` - :param web_image: Dictionary representing a web image - - :rtype: :class:`~google.cloud.vision.web.WebImage` - :returns: Populated instance of ``WebImage``. - """ - return cls(web_image.url, web_image.score) - - @property - def url(self): - """The URL of the matched image. - - :rtype: str - :returns: URL of matched image. - """ - return self._url - - @property - def score(self): - """Overall relevancy score for the image. - - .. note:: - - Not normalized nor comparable between requests. - - :rtype: float - :returns: Relevancy score as a float. - """ - return self._score - - -class WebPage(object): - """Web page that may contain this image or a similar one. - - :type url: str - :param url: URL of the matched image. - - :type score: float - :param score: Overall relevancy score of the image. - """ - def __init__(self, url, score): - self._url = url - self._score = score - - @classmethod - def from_api_repr(cls, web_page): - """Factory: construct ``WebPage`` from Vision API response. - - :type web_page: dict - :param web_page: Dictionary representing a web page - - :rtype: :class:`~google.cloud.vision.web.WebPage` - :returns: Populated instance of ``WebPage``. - """ - return cls(web_page['url'], web_page['score']) - - @classmethod - def from_pb(cls, web_page): - """Factory: construct ``WebPage`` from Vision API response. - - :type web_page: :class:`~google.cloud.proto.vision.v1.\ - web_detection_pb2.WebDetection.WebPage` - :param web_page: Dictionary representing a web image - - :rtype: :class:`~google.cloud.vision.web.WebPage` - :returns: Populated instance of ``WebPage``. - """ - return cls(web_page.url, web_page.score) - - @property - def url(self): - """The page URL. - - :rtype: str - :returns: String representing a URL. - """ - return self._url - - @property - def score(self): - """Overall relevancy score for the image. - - .. note:: - - Not normalized nor comparable between requests. - - :rtype: float - :returns: Relevancy score as a float. - """ - return self._score diff --git a/vision/google/cloud/vision_v1.py b/vision/google/cloud/vision_v1.py new file mode 100644 index 0000000000000..49c077175f2d2 --- /dev/null +++ b/vision/google/cloud/vision_v1.py @@ -0,0 +1,38 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from google.cloud.gapic.vision.v1 import image_annotator_client as iac +from google.cloud.gapic.vision.v1 import enums + +from google.cloud.proto.vision.v1 import geometry_pb2 as geometry +from google.cloud.proto.vision.v1 import image_annotator_pb2 as image_annotator +from google.cloud.proto.vision.v1 import text_annotation_pb2 as text_annotation +from google.cloud.proto.vision.v1 import web_detection_pb2 as web_detection + +from google.cloud.vision.decorators import add_single_feature_methods +from google.cloud.vision.helpers import VisionHelpers + + +@add_single_feature_methods +class ImageAnnotatorClient(VisionHelpers, iac.ImageAnnotatorClient): + __doc__ = iac.ImageAnnotatorClient.__doc__ + enums = enums + + +__all__ = ( + 'enums', 'geometry', 'image_annotator', 'ImageAnnotatorClient', + 'text_annotation', 'web_detection', +) diff --git a/vision/nox.py b/vision/nox.py index 0008296bdbe37..751e1745b6f64 100644 --- a/vision/nox.py +++ b/vision/nox.py @@ -19,9 +19,6 @@ import nox -LOCAL_DEPS = ('../core/',) - - @nox.session @nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6']) def unit_tests(session, python_version): @@ -31,36 +28,31 @@ def unit_tests(session, python_version): session.interpreter = 'python{}'.format(python_version) # Install all test dependencies, then install this package in-place. - session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) + session.install('mock', 'pytest', 'pytest-cov') session.install('-e', '.') # Run py.test against the unit tests. session.run('py.test', '--quiet', - '--cov=google.cloud.vision', '--cov=tests.unit', '--cov-append', - '--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97', - 'tests/unit', + '--cov=google.cloud.vision', '--cov=google.cloud.vision_v1', + '--cov-append', '--cov-config=.coveragerc', '--cov-report=', + 'tests/', ) @nox.session @nox.parametrize('python_version', ['2.7', '3.6']) def system_tests(session, python_version): - """Run the system test suite.""" - - # Sanity check: Only run system tests if the environment variable is set. - if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): - return + """Run the unit test suite.""" - # Run the system tests against latest Python 2 and Python 3 only. + # Run unit tests against all supported versions of Python. session.interpreter = 'python{}'.format(python_version) - # Install all test dependencies, then install this package into the - # virutalenv's dist-packages. - session.install('mock', 'pytest', *LOCAL_DEPS) - session.install('../test_utils/', '../storage/') - session.install('.') + # Install all test dependencies, then install this package in-place. + session.install('pytest', '../core/', '../storage/') + session.install('../test_utils/') + session.install('-e', '.') - # Run py.test against the system tests. + # Run py.test against the unit tests. session.run('py.test', '--quiet', 'tests/system.py') @@ -72,16 +64,16 @@ def lint(session): serious code quality issues. """ session.interpreter = 'python3.6' - session.install('flake8', *LOCAL_DEPS) + session.install('flake8') session.install('.') - session.run('flake8', 'google/cloud/vision') + session.run('flake8', 'google/cloud/vision.py') @nox.session def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.interpreter = 'python3.6' - session.install('docutils', 'Pygments') + session.install('docutils', 'pygments') session.run( 'python', 'setup.py', 'check', '--restructuredtext', '--strict') @@ -96,5 +88,5 @@ def cover(session): session.interpreter = 'python3.6' session.chdir(os.path.dirname(__file__)) session.install('coverage', 'pytest-cov') - session.run('coverage', 'report', '--show-missing', '--fail-under=100') + session.run('coverage', 'report', '--show-missing') session.run('coverage', 'erase') diff --git a/vision/requirements.txt b/vision/requirements.txt new file mode 100644 index 0000000000000..e3875cfb5b7d3 --- /dev/null +++ b/vision/requirements.txt @@ -0,0 +1,3 @@ +googleapis-common-protos >= 1.5.2, < 2.0dev +google-gax >= 0.15.11, < 0.16dev +six >= 1.10.0 diff --git a/vision/setup.py b/vision/setup.py index fa060544598c3..48f11f2e8ef2f 100644 --- a/vision/setup.py +++ b/vision/setup.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import io import os from setuptools import find_packages @@ -20,21 +21,38 @@ PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) -with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj: - README = file_obj.read() +with io.open(os.path.join(PACKAGE_ROOT, 'README.rst'), 'r') as readme_file: + readme = readme_file.read() -# NOTE: This is duplicated throughout and we should try to -# consolidate. -SETUP_BASE = { - 'author': 'Google Cloud Platform', - 'author_email': 'jjg+google-cloud-python@google.com', - 'scripts': [], - 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python', - 'license': 'Apache 2.0', - 'platforms': 'Posix; MacOS X; Windows', - 'include_package_data': True, - 'zip_safe': False, - 'classifiers': [ +cur_dir = os.path.realpath(os.path.dirname(__file__)) +with io.open('%s/requirements.txt' % cur_dir) as requirements_file: + requirements = requirements_file.read().strip().split('\n') + + +setup( + author='Google Cloud Platform', + author_email='googleapis-packages@google.com', + name='google-cloud-vision', + version='0.25.0', + description='Python Client for Google Cloud Vision', + long_description=readme, + namespace_packages=[ + 'google', + 'google.cloud', + 'google.cloud.gapic', + 'google.cloud.gapic.vision', + 'google.cloud.proto', + 'google.cloud.proto.vision', + ], + packages=find_packages(exclude=('tests*',)), + install_requires=requirements, + url='https://github.com/GoogleCloudPlatform/google-cloud-python', + license='Apache 2.0', + platforms='Posix; MacOS X; Windows', + include_package_data=True, + zip_safe=False, + scripts=[], + classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', @@ -47,25 +65,4 @@ 'Programming Language :: Python :: 3.6', 'Topic :: Internet', ], -} - - -REQUIREMENTS = [ - 'enum34', - 'google-cloud-core >= 0.24.0, < 0.25dev', - 'gapic-google-cloud-vision-v1 >= 0.90.3, < 0.91dev', -] - -setup( - name='google-cloud-vision', - version='0.24.0', - description='Python Client for Google Cloud Vision', - long_description=README, - namespace_packages=[ - 'google', - 'google.cloud', - ], - packages=find_packages(exclude=('tests*',)), - install_requires=REQUIREMENTS, - **SETUP_BASE ) diff --git a/vision/tests/gapic/v1/test_image_annotator_client_v1.py b/vision/tests/gapic/v1/test_image_annotator_client_v1.py new file mode 100644 index 0000000000000..038a3c725f5a3 --- /dev/null +++ b/vision/tests/gapic/v1/test_image_annotator_client_v1.py @@ -0,0 +1,75 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests.""" + +import mock +import unittest + +from google.gax import errors + +from google.cloud.gapic.vision.v1 import image_annotator_client +from google.cloud.proto.vision.v1 import image_annotator_pb2 + + +class CustomException(Exception): + pass + + +class TestImageAnnotatorClient(unittest.TestCase): + @mock.patch('google.gax.config.create_stub', spec=True) + def test_batch_annotate_images(self, mock_create_stub): + # Mock gRPC layer + grpc_stub = mock.Mock() + mock_create_stub.return_value = grpc_stub + + client = image_annotator_client.ImageAnnotatorClient() + + # Mock request + requests = [] + + # Mock response + expected_response = image_annotator_pb2.BatchAnnotateImagesResponse() + grpc_stub.BatchAnnotateImages.return_value = expected_response + + response = client.batch_annotate_images(requests) + self.assertEqual(expected_response, response) + + grpc_stub.BatchAnnotateImages.assert_called_once() + args, kwargs = grpc_stub.BatchAnnotateImages.call_args + self.assertEqual(len(args), 2) + self.assertEqual(len(kwargs), 1) + self.assertIn('metadata', kwargs) + actual_request = args[0] + + expected_request = image_annotator_pb2.BatchAnnotateImagesRequest( + requests=requests) + self.assertEqual(expected_request, actual_request) + + @mock.patch('google.gax.config.API_ERRORS', (CustomException, )) + @mock.patch('google.gax.config.create_stub', spec=True) + def test_batch_annotate_images_exception(self, mock_create_stub): + # Mock gRPC layer + grpc_stub = mock.Mock() + mock_create_stub.return_value = grpc_stub + + client = image_annotator_client.ImageAnnotatorClient() + + # Mock request + requests = [] + + # Mock exception response + grpc_stub.BatchAnnotateImages.side_effect = CustomException() + + self.assertRaises(errors.GaxError, client.batch_annotate_images, + requests) diff --git a/vision/tests/system.py b/vision/tests/system.py index cddf399ddf5f0..4bbbee912b6b2 100644 --- a/vision/tests/system.py +++ b/vision/tests/system.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2017, Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,9 +22,6 @@ from google.cloud import exceptions from google.cloud import storage from google.cloud import vision -from google.cloud.vision.entity import EntityAnnotation -from google.cloud.vision.feature import Feature -from google.cloud.vision.feature import FeatureTypes from test_utils.retry import RetryErrors from test_utils.system import unique_resource_id @@ -39,706 +36,69 @@ FULL_TEXT_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'full-text.jpg') -class Config(object): - CLIENT = None - TEST_BUCKET = None +class VisionSystemTestBase(unittest.TestCase): + client = None + test_bucket = None + + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() def setUpModule(): - Config.CLIENT = vision.Client() + VisionSystemTestBase.client = vision.ImageAnnotatorClient() storage_client = storage.Client() bucket_name = 'new' + unique_resource_id() - Config.TEST_BUCKET = storage_client.bucket(bucket_name) + VisionSystemTestBase.test_bucket = storage_client.bucket(bucket_name) + # 429 Too Many Requests in case API requests rate-limited. retry_429 = RetryErrors(exceptions.TooManyRequests) - retry_429(Config.TEST_BUCKET.create)() + retry_429(VisionSystemTestBase.test_bucket.create)() def tearDownModule(): # 409 Conflict if the bucket is full. # 429 Too Many Requests in case API requests rate-limited. bucket_retry = RetryErrors( - (exceptions.TooManyRequests, exceptions.Conflict)) - bucket_retry(Config.TEST_BUCKET.delete)(force=True) - - -class BaseVisionTestCase(unittest.TestCase): - def _assert_coordinate(self, coordinate): - if coordinate is None: - return - self.assertIsNotNone(coordinate) - self.assertIsInstance(coordinate, (int, float)) - - def _assert_likelihood(self, likelihood): - from google.cloud.vision.likelihood import Likelihood - - levels = [Likelihood.UNKNOWN, Likelihood.VERY_LIKELY, - Likelihood.UNLIKELY, Likelihood.POSSIBLE, Likelihood.LIKELY, - Likelihood.VERY_UNLIKELY] - self.assertIn(likelihood, levels) - - def _pb_not_implemented_skip(self, message): - if Config.CLIENT._use_grpc: - self.skipTest(message) - - -class TestVisionFullText(unittest.TestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_full_text(self, full_text): - from google.cloud.vision.text import TextAnnotation - - self.assertIsInstance(full_text, TextAnnotation) - self.assertIsInstance(full_text.text, six.text_type) - self.assertEqual(len(full_text.pages), 1) - self.assertIsInstance(full_text.pages[0].width, int) - self.assertIsInstance(full_text.pages[0].height, int) - - def test_detect_full_text_content(self): - client = Config.CLIENT - with open(FULL_TEXT_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - full_text = image.detect_full_text(language_hints=['en']) - self._assert_full_text(full_text) - - def test_detect_full_text_filename(self): - client = Config.CLIENT - image = client.image(filename=FULL_TEXT_FILE) - full_text = image.detect_full_text(language_hints=['en']) - self._assert_full_text(full_text) - - def test_detect_full_text_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'full-text.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(FULL_TEXT_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - client = Config.CLIENT - image = client.image(source_uri=source_uri) - full_text = image.detect_full_text(language_hints=['en']) - self._assert_full_text(full_text) - - -class TestVisionClientCropHint(BaseVisionTestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_crop_hint(self, hint): - from google.cloud.vision.crop_hint import CropHint - from google.cloud.vision.geometry import Bounds - - self.assertIsInstance(hint, CropHint) - self.assertIsInstance(hint.bounds, Bounds) - self.assertGreater(len(hint.bounds.vertices), 1) - self.assertIsInstance(hint.confidence, (int, float)) - self.assertIsInstance(hint.importance_fraction, float) - - def test_detect_crop_hints_content(self): - client = Config.CLIENT - with open(FACE_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - crop_hints = image.detect_crop_hints( - aspect_ratios=[1.3333, 1.7777], limit=2) - self.assertEqual(len(crop_hints), 2) - for hint in crop_hints: - self._assert_crop_hint(hint) - - def test_detect_crop_hints_filename(self): - client = Config.CLIENT - image = client.image(filename=FACE_FILE) - crop_hints = image.detect_crop_hints( - aspect_ratios=[1.3333, 1.7777], limit=2) - self.assertEqual(len(crop_hints), 2) - for hint in crop_hints: - self._assert_crop_hint(hint) - - def test_detect_crop_hints_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'faces.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(FACE_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - client = Config.CLIENT - image = client.image(source_uri=source_uri) - crop_hints = image.detect_crop_hints( - aspect_ratios=[1.3333, 1.7777], limit=2) - self.assertEqual(len(crop_hints), 2) - for hint in crop_hints: - self._assert_crop_hint(hint) - - -class TestVisionClientLogo(unittest.TestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() + (exceptions.TooManyRequests, exceptions.Conflict), + ) + bucket_retry(VisionSystemTestBase.test_bucket.delete)(force=True) - def _assert_logo(self, logo): - self.assertIsInstance(logo, EntityAnnotation) - self.assertEqual(logo.description, 'Google') - self.assertEqual(len(logo.bounds.vertices), 4) - self.assertEqual(logo.bounds.vertices[0].x_coordinate, 40) - self.assertEqual(logo.bounds.vertices[0].y_coordinate, 40) - self.assertEqual(logo.bounds.vertices[1].x_coordinate, 959) - self.assertEqual(logo.bounds.vertices[1].y_coordinate, 40) - self.assertEqual(logo.bounds.vertices[2].x_coordinate, 959) - self.assertEqual(logo.bounds.vertices[2].y_coordinate, 302) - self.assertEqual(logo.bounds.vertices[3].x_coordinate, 40) - self.assertEqual(logo.bounds.vertices[3].y_coordinate, 302) - self.assertTrue(logo.score > 0.25) +class TestVisionClientLogo(VisionSystemTestBase): def test_detect_logos_content(self): - client = Config.CLIENT + # Read the file. with open(LOGO_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - logos = image.detect_logos() - self.assertEqual(len(logos), 1) - logo = logos[0] - self._assert_logo(logo) + content = image_file.read() - def test_detect_logos_filename(self): - client = Config.CLIENT - image = client.image(filename=LOGO_FILE) - logos = image.detect_logos() - self.assertEqual(len(logos), 1) - logo = logos[0] - self._assert_logo(logo) + # Make the request. + response = self.client.logo_detection({ + 'content': content, + }) + + # Check to ensure we got what we expect. + assert len(response.logo_annotations) == 1 + assert response.logo_annotations[0].description == 'Google' def test_detect_logos_gcs(self): - bucket_name = Config.TEST_BUCKET.name + # Upload the image to Google Cloud Storage. blob_name = 'logo.png' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(LOGO_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - client = Config.CLIENT - image = client.image(source_uri=source_uri) - logos = image.detect_logos() - self.assertEqual(len(logos), 1) - logo = logos[0] - self._assert_logo(logo) - - -class TestVisionClientFace(BaseVisionTestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_landmarks(self, landmarks): - from google.cloud.vision.face import Landmark - from google.cloud.vision.face import LandmarkTypes - from google.cloud.vision.face import Position - - for landmark in LandmarkTypes: - if landmark is not LandmarkTypes.UNKNOWN_LANDMARK: - feature = getattr(landmarks, landmark.name.lower()) - self.assertIsInstance(feature, Landmark) - self.assertIsInstance(feature.position, Position) - self._assert_coordinate(feature.position.x_coordinate) - self._assert_coordinate(feature.position.y_coordinate) - self._assert_coordinate(feature.position.z_coordinate) - - def _assert_face(self, face): - from google.cloud.vision.face import Bounds - from google.cloud.vision.face import FDBounds - from google.cloud.vision.face import Face - from google.cloud.vision.face import Landmarks - from google.cloud.vision.geometry import Vertex - - self.assertIsInstance(face, Face) - self.assertGreater(face.detection_confidence, 0.0) - self._assert_likelihood(face.anger) - self._assert_likelihood(face.joy) - self._assert_likelihood(face.sorrow) - self._assert_likelihood(face.surprise) - self._assert_likelihood(face.image_properties.blurred) - self._assert_likelihood(face.image_properties.underexposed) - self._assert_likelihood(face.headwear) - self.assertNotEqual(face.angles.roll, 0.0) - self.assertNotEqual(face.angles.pan, 0.0) - self.assertNotEqual(face.angles.tilt, 0.0) - - self.assertIsInstance(face.bounds, Bounds) - for vertex in face.bounds.vertices: - self.assertIsInstance(vertex, Vertex) - self._assert_coordinate(vertex.x_coordinate) - self._assert_coordinate(vertex.y_coordinate) - - self.assertIsInstance(face.fd_bounds, FDBounds) - for vertex in face.fd_bounds.vertices: - self.assertIsInstance(vertex, Vertex) - self._assert_coordinate(vertex.x_coordinate) - self._assert_coordinate(vertex.y_coordinate) - - self.assertIsInstance(face.landmarks, Landmarks) - self._assert_landmarks(face.landmarks) - - def test_detect_faces_content(self): - client = Config.CLIENT - with open(FACE_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - faces = image.detect_faces() - self.assertEqual(len(faces), 5) - for face in faces: - self._assert_face(face) - - def test_detect_faces_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'faces.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(FACE_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - client = Config.CLIENT - image = client.image(source_uri=source_uri) - faces = image.detect_faces() - self.assertEqual(len(faces), 5) - for face in faces: - self._assert_face(face) - - def test_detect_faces_filename(self): - client = Config.CLIENT - image = client.image(filename=FACE_FILE) - faces = image.detect_faces() - self.assertEqual(len(faces), 5) - for face in faces: - self._assert_face(face) - - -class TestVisionClientLabel(BaseVisionTestCase): - DESCRIPTIONS = ( - 'car', - 'vehicle', - 'land vehicle', - 'automotive design', - 'wheel', - 'automobile make', - 'luxury vehicle', - 'sports car', - 'performance car', - 'automotive exterior', - ) - - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_label(self, label): - self.assertIsInstance(label, EntityAnnotation) - self.assertIn(label.description, self.DESCRIPTIONS) - self.assertIsInstance(label.mid, six.text_type) - self.assertGreater(label.score, 0.0) - - def test_detect_labels_content(self): - client = Config.CLIENT - with open(LABEL_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - labels = image.detect_labels() - self.assertEqual(len(labels), 10) - for label in labels: - self._assert_label(label) - - def test_detect_labels_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'car.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(LABEL_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - client = Config.CLIENT - image = client.image(source_uri=source_uri) - labels = image.detect_labels() - self.assertEqual(len(labels), 10) - for label in labels: - self._assert_label(label) - - def test_detect_labels_filename(self): - client = Config.CLIENT - image = client.image(filename=LABEL_FILE) - labels = image.detect_labels() - self.assertEqual(len(labels), 10) - for label in labels: - self._assert_label(label) - - -class TestVisionClientLandmark(BaseVisionTestCase): - DESCRIPTIONS = ('Mount Rushmore',) - - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_landmark(self, landmark): - self.assertIsInstance(landmark, EntityAnnotation) - self.assertIn(landmark.description, self.DESCRIPTIONS) - self.assertEqual(len(landmark.locations), 1) - location = landmark.locations[0] - self._assert_coordinate(location.latitude) - self._assert_coordinate(location.longitude) - for vertex in landmark.bounds.vertices: - self._assert_coordinate(vertex.x_coordinate) - self._assert_coordinate(vertex.y_coordinate) - self.assertGreater(landmark.score, 0.2) - self.assertIsInstance(landmark.mid, six.text_type) - - def test_detect_landmark_content(self): - client = Config.CLIENT - with open(LANDMARK_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - landmarks = image.detect_landmarks() - self.assertEqual(len(landmarks), 1) - landmark = landmarks[0] - self._assert_landmark(landmark) - - def test_detect_landmark_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'landmark.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(LANDMARK_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - client = Config.CLIENT - image = client.image(source_uri=source_uri) - landmarks = image.detect_landmarks() - self.assertEqual(len(landmarks), 1) - landmark = landmarks[0] - self._assert_landmark(landmark) - - def test_detect_landmark_filename(self): - client = Config.CLIENT - image = client.image(filename=LANDMARK_FILE) - landmarks = image.detect_landmarks() - self.assertEqual(len(landmarks), 1) - landmark = landmarks[0] - self._assert_landmark(landmark) - - -class TestVisionClientSafeSearch(BaseVisionTestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_safe_search(self, safe_search): - from google.cloud.vision.safe_search import SafeSearchAnnotation - - self.assertIsInstance(safe_search, SafeSearchAnnotation) - self._assert_likelihood(safe_search.adult) - self._assert_likelihood(safe_search.spoof) - self._assert_likelihood(safe_search.medical) - self._assert_likelihood(safe_search.violence) - - def test_detect_safe_search_content(self): - client = Config.CLIENT - with open(FACE_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - safe_search = image.detect_safe_search() - self._assert_safe_search(safe_search) - - def test_detect_safe_search_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'faces.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(FACE_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - client = Config.CLIENT - image = client.image(source_uri=source_uri) - safe_search = image.detect_safe_search() - self._assert_safe_search(safe_search) - - def test_detect_safe_search_filename(self): - client = Config.CLIENT - image = client.image(filename=FACE_FILE) - safe_search = image.detect_safe_search() - self._assert_safe_search(safe_search) - - -class TestVisionClientText(unittest.TestCase): - DESCRIPTIONS = ( - 'Do', - 'what', - 'is', - 'right,', - 'not', - 'what', - 'is', - 'easy', - 'Do what is\nright, not\nwhat is easy\n', - ) - - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_text(self, text): - self.assertIsInstance(text, EntityAnnotation) - self.assertIn(text.description, self.DESCRIPTIONS) - self.assertIn(text.locale, (None, '', 'en')) - self.assertIsInstance(text.score, (type(None), float)) - - def test_detect_text_content(self): - client = Config.CLIENT - with open(TEXT_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - texts = image.detect_text() - self.assertEqual(len(texts), 9) - for text in texts: - self._assert_text(text) - - def test_detect_text_gcs(self): - bucket_name = Config.TEST_BUCKET.name - blob_name = 'text.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(TEXT_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - client = Config.CLIENT - image = client.image(source_uri=source_uri) - texts = image.detect_text() - self.assertEqual(len(texts), 9) - for text in texts: - self._assert_text(text) - - def test_detect_text_filename(self): - client = Config.CLIENT - image = client.image(filename=TEXT_FILE) - texts = image.detect_text() - self.assertEqual(len(texts), 9) - for text in texts: - self._assert_text(text) - - -class TestVisionClientImageProperties(BaseVisionTestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_color(self, color): - self.assertIsInstance(color.red, float) - self.assertIsInstance(color.green, float) - self.assertIsInstance(color.blue, float) - self.assertIsInstance(color.alpha, float) - self.assertNotEqual(color.red, 0.0) - self.assertNotEqual(color.green, 0.0) - self.assertNotEqual(color.blue, 0.0) - - def _assert_properties(self, image_property): - from google.cloud.vision.color import ImagePropertiesAnnotation - - self.assertIsInstance(image_property, ImagePropertiesAnnotation) - results = image_property.colors - for color_info in results: - self._assert_color(color_info.color) - self.assertNotEqual(color_info.pixel_fraction, 0.0) - self.assertNotEqual(color_info.score, 0.0) - - def test_detect_properties_content(self): - client = Config.CLIENT - with open(FACE_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - properties = image.detect_properties() - self._assert_properties(properties) - - def test_detect_properties_gcs(self): - client = Config.CLIENT - bucket_name = Config.TEST_BUCKET.name - blob_name = 'faces.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(FACE_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - image = client.image(source_uri=source_uri) - properties = image.detect_properties() - self._assert_properties(properties) - - def test_detect_properties_filename(self): - client = Config.CLIENT - image = client.image(filename=FACE_FILE) - properties = image.detect_properties() - self._assert_properties(properties) - - -class TestVisionBatchProcessing(BaseVisionTestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def test_batch_detect_gcs(self): - client = Config.CLIENT - bucket_name = Config.TEST_BUCKET.name - - # Logo GCS image. - blob_name = 'logos.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(LOGO_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - logo_source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - image_one = client.image(source_uri=logo_source_uri) - logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 2) - - # Faces GCS image. - blob_name = 'faces.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(FACE_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - face_source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - image_two = client.image(source_uri=face_source_uri) - face_feature = Feature(FeatureTypes.FACE_DETECTION, 2) - - batch = client.batch() - batch.add_image(image_one, [logo_feature]) - batch.add_image(image_two, [face_feature, logo_feature]) - results = batch.detect() - self.assertEqual(len(results), 2) - self.assertIsInstance(results[0], vision.annotations.Annotations) - self.assertIsInstance(results[1], vision.annotations.Annotations) - self.assertEqual(len(results[0].logos), 1) - self.assertEqual(len(results[0].faces), 0) - - self.assertEqual(len(results[1].logos), 0) - self.assertEqual(len(results[1].faces), 2) - - -class TestVisionWebAnnotation(BaseVisionTestCase): - def setUp(self): - self.to_delete_by_case = [] - - def tearDown(self): - for value in self.to_delete_by_case: - value.delete() - - def _assert_web_entity(self, web_entity): - from google.cloud.vision.web import WebEntity - - self.assertIsInstance(web_entity, WebEntity) - self.assertIsInstance(web_entity.entity_id, six.text_type) - self.assertIsInstance(web_entity.score, float) - self.assertIsInstance(web_entity.description, six.text_type) - - def _assert_web_image(self, web_image): - from google.cloud.vision.web import WebImage - - self.assertIsInstance(web_image, WebImage) - self.assertIsInstance(web_image.url, six.text_type) - self.assertIsInstance(web_image.score, float) - - def _assert_web_page(self, web_page): - from google.cloud.vision.web import WebPage - - self.assertIsInstance(web_page, WebPage) - self.assertIsInstance(web_page.url, six.text_type) - self.assertIsInstance(web_page.score, float) - - def _assert_web_images(self, web_images, limit): - self.assertEqual(len(web_images.web_entities), limit) - for web_entity in web_images.web_entities: - self._assert_web_entity(web_entity) - - self.assertEqual(len(web_images.full_matching_images), limit) - for web_image in web_images.full_matching_images: - self._assert_web_image(web_image) - - self.assertEqual(len(web_images.partial_matching_images), limit) - for web_image in web_images.partial_matching_images: - self._assert_web_image(web_image) - - self.assertEqual(len(web_images.pages_with_matching_images), limit) - for web_page in web_images.pages_with_matching_images: - self._assert_web_page(web_page) - - @RetryErrors(unittest.TestCase.failureException) - def test_detect_web_images_from_content(self): - client = Config.CLIENT - with open(LANDMARK_FILE, 'rb') as image_file: - image = client.image(content=image_file.read()) - limit = 3 - web_images = image.detect_web(limit=limit) - self._assert_web_images(web_images, limit) - - def test_detect_web_images_from_gcs(self): - client = Config.CLIENT - bucket_name = Config.TEST_BUCKET.name - blob_name = 'landmark.jpg' - blob = Config.TEST_BUCKET.blob(blob_name) - self.to_delete_by_case.append(blob) # Clean-up. - with open(LANDMARK_FILE, 'rb') as file_obj: - blob.upload_from_file(file_obj) - - source_uri = 'gs://%s/%s' % (bucket_name, blob_name) - - image = client.image(source_uri=source_uri) - limit = 5 - web_images = image.detect_web(limit=limit) - self._assert_web_images(web_images, limit) - - def test_detect_web_images_from_filename(self): - client = Config.CLIENT - image = client.image(filename=LANDMARK_FILE) - limit = 5 - web_images = image.detect_web(limit=limit) - self._assert_web_images(web_images, limit) + blob = self.test_bucket.blob(blob_name) + self.to_delete_by_case.append(blob) + with open(LOGO_FILE, 'rb') as image_file: + blob.upload_from_file(image_file) + + # Make the request. + response = self.client.logo_detection({ + 'source': {'image_uri': 'gs://{bucket}/{blob}'.format( + bucket=self.test_bucket.name, + blob=blob_name, + )}, + }) + + # Check the response. + assert len(response.logo_annotations) == 1 + assert response.logo_annotations[0].description == 'Google' diff --git a/vision/tests/unit/__init__.py b/vision/tests/unit/__init__.py deleted file mode 100644 index 58e0d91536321..0000000000000 --- a/vision/tests/unit/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/vision/tests/unit/_fixtures.py b/vision/tests/unit/_fixtures.py deleted file mode 100644 index 6075fb2eea319..0000000000000 --- a/vision/tests/unit/_fixtures.py +++ /dev/null @@ -1,2001 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the 'License'); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -FULL_TEXT_RESPONSE = { - 'responses': [{ - 'fullTextAnnotation': { - 'pages': [{ - 'height': 1872, - 'property': { - 'detectedLanguages': [{ - 'languageCode': 'en' - }] - }, - 'blocks': [{ - 'blockType': 'TEXT', - 'property': { - 'detectedLanguages': [{ - 'languageCode': 'en' - }] - }, - 'boundingBox': { - 'vertices': [{ - 'y': 8, - 'x': 344 - }, { - 'y': 8, - 'x': 434 - }, { - 'y': 22, - 'x': 434 - }, { - 'y': 22, - 'x': 344 - }] - }, - 'paragraphs': [{ - 'property': { - 'detectedLanguages': [{ - 'languageCode': 'en' - }] - }, - 'words': [{ - 'symbols': [{ - 'property': { - 'detectedLanguages': [{ - 'languageCode': 'en' - }] - }, - 'text': 'T', - 'boundingBox': { - 'vertices': [{ - 'y': 8, - 'x': 344 - }, { - 'y': 8, - 'x': 352 - }, { - 'y': 22, - 'x': 352 - }, { - 'y': 22, - 'x': 344 - }] - } - }], - 'property': { - 'detectedLanguages': [{ - 'languageCode': 'en' - }] - }, - 'boundingBox': { - 'vertices': [{ - 'y': 8, - 'x': 377 - }, { - 'y': 8, - 'x': 434 - }, { - 'y': 22, - 'x': 434 - }, { - 'y': 22, - 'x': 377 - }] - } - }], - 'boundingBox': { - 'vertices': [{ - 'y': 8, - 'x': 344 - }, { - 'y': 8, - 'x': 434 - }, { - 'y': 22, - 'x': 434 - }, { - 'y': 22, - 'x': 344 - }] - } - }] - }], - 'width': 792 - }], - 'text': 'The Republic\nBy Plato' - } - }] -} - -CROP_HINTS_RESPONSE = { - "responses": [{ - "cropHintsAnnotation": { - "cropHints": [{ - "importanceFraction": 1.22, - "boundingPoly": { - "vertices": [{ - "x": 77 - }, { - "x": 1821 - }, { - "x": 1821, - "y": 1306 - }, { - "x": 77, - "y": 1306 - }] - }, - "confidence": 0.5 - }, { - "importanceFraction": 1.2099999, - "boundingPoly": { - "vertices": [{}, { - "x": 1959 - }, { - "x": 1959, - "y": 1096 - }, { - "y": 1096 - }] - }, - "confidence": 0.29999998 - }] - } - }] -} - - -IMAGE_PROPERTIES_RESPONSE = { - 'responses': [ - { - 'imagePropertiesAnnotation': { - 'dominantColors': { - 'colors': [ - { - 'color': { - 'red': 253, - 'green': 203, - 'blue': 65, - 'alpha': 0.0 - }, - 'score': 0.42258179, - 'pixelFraction': 0.025376344 - }, - { - 'color': { - 'red': 216, - 'green': 69, - 'blue': 56 - }, - 'score': 0.34945792, - 'pixelFraction': 0.026093191 - }, - { - 'color': { - 'red': 79, - 'green': 142, - 'blue': 245 - }, - 'score': 0.050921876, - 'pixelFraction': 0.014193549 - }, - { - 'color': { - 'red': 249, - 'green': 246, - 'blue': 246 - }, - 'score': 0.0059412993, - 'pixelFraction': 0.86896056 - }, - { - 'color': { - 'red': 222, - 'green': 119, - 'blue': 51 - }, - 'score': 0.0043299114, - 'pixelFraction': 0.00021505376 - }, - { - 'color': { - 'red': 226, - 'green': 138, - 'blue': 130 - }, - 'score': 0.0038594988, - 'pixelFraction': 0.00086021505 - }, - { - 'color': { - 'red': 165, - 'green': 194, - 'blue': 243 - }, - 'score': 0.0029492097, - 'pixelFraction': 0.0015053763 - }, - { - 'color': { - 'red': 231, - 'green': 169, - 'blue': 164 - }, - 'score': 0.0017002203, - 'pixelFraction': 0.00043010752 - }, - { - 'color': { - 'red': 137, - 'green': 98, - 'blue': 142 - }, - 'score': 0.0013974205, - 'pixelFraction': 0.00071684585 - }, - { - 'color': { - 'red': 239, - 'green': 179, - 'blue': 56 - }, - 'score': 0.050473157, - 'pixelFraction': 0.0022222223 - } - ] - } - } - } - ] -} - -LABEL_DETECTION_RESPONSE = { - 'responses': [ - { - 'labelAnnotations': [ - { - 'mid': '/m/0k4j', - 'description': 'automobile', - 'score': 0.9776855 - }, - { - 'mid': '/m/07yv9', - 'description': 'vehicle', - 'score': 0.947987 - }, - { - 'mid': '/m/07r04', - 'description': 'truck', - 'score': 0.88429511 - } - ] - } - ] -} - - -LANDMARK_DETECTION_RESPONSE = { - 'responses': [ - { - 'landmarkAnnotations': [ - { - 'mid': '/m/04gdr', - 'description': 'Louvre', - 'score': 0.67257267, - 'boundingPoly': { - 'vertices': [ - { - 'x': 1075, - 'y': 49 - }, - { - 'x': 1494, - 'y': 49 - }, - { - 'x': 1494, - 'y': 307 - }, - { - 'x': 1075, - 'y': 307 - } - ] - }, - 'locations': [ - { - 'latLng': { - 'latitude': 48.861013, - 'longitude': 2.335818 - } - } - ] - }, - { - 'mid': '/m/094llg', - 'description': 'Louvre Pyramid', - 'score': 0.53734678, - 'boundingPoly': { - 'vertices': [ - { - 'x': 227, - 'y': 274 - }, - { - 'x': 1471, - 'y': 274 - }, - { - 'x': 1471, - 'y': 624 - }, - { - 'x': 227, - 'y': 624 - } - ] - }, - 'locations': [ - { - 'latLng': { - 'latitude': 48.860749, - 'longitude': 2.336312 - } - } - ] - } - ] - } - ] -} - -LOGO_DETECTION_RESPONSE = { - 'responses': [ - { - 'logoAnnotations': [ - { - 'mid': '/m/05b5c', - 'description': 'Brand1', - 'score': 0.63192177, - 'boundingPoly': { - 'vertices': [ - { - 'x': 78, - 'y': 162 - }, - { - 'x': 282, - 'y': 162 - }, - { - 'x': 282, - 'y': 211 - }, - { - 'x': 78, - 'y': 211 - } - ] - } - }, - { - 'mid': '/m/0fpzzp', - 'description': 'Brand2', - 'score': 0.5492993, - 'boundingPoly': { - 'vertices': [ - { - 'x': 310, - 'y': 209 - }, - { - 'x': 477, - 'y': 209 - }, - { - 'x': 477, - 'y': 282 - }, - { - 'x': 310, - 'y': 282 - } - ] - } - } - ] - } - ] -} - -FACE_DETECTION_RESPONSE = { - 'responses': [{ - 'faceAnnotations': [{ - 'headwearLikelihood': 'VERY_UNLIKELY', - 'panAngle': 6.027647, - 'underExposedLikelihood': 'VERY_UNLIKELY', - 'landmarkingConfidence': 0.54453093, - 'detectionConfidence': 0.9863683, - 'joyLikelihood': 'VERY_LIKELY', - 'landmarks': [{ - 'position': { - 'y': 482.69385, - 'x': 1004.8003, - 'z': 0.0016593217 - }, - 'type': 'LEFT_EYE' - }, { - 'position': { - 'y': 470.90149, - 'x': 1218.9751, - 'z': 20.597967 - }, - 'type': 'RIGHT_EYE' - }, { - 'position': { - 'y': 441.46521, - 'x': 934.25629, - 'z': -1.1400928 - }, - 'type': 'LEFT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 449.2872, - 'x': 1059.306, - 'z': -47.195843 - }, - 'type': 'RIGHT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 446.05408, - 'x': 1163.678, - 'z': -37.211197 - }, - 'type': 'LEFT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 424.18341, - 'x': 1285.0209, - 'z': 34.844131 - }, - 'type': 'RIGHT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 485.18387, - 'x': 1113.4325, - 'z': -32.579361 - }, - 'type': 'MIDPOINT_BETWEEN_EYES' - }, { - 'position': { - 'y': 620.27032, - 'x': 1122.5671, - 'z': -51.019524 - }, - 'type': 'NOSE_TIP' - }, { - 'position': { - 'y': 674.32526, - 'x': 1117.0417, - 'z': 17.330631 - }, - 'type': 'UPPER_LIP' - }, { - 'position': { - 'y': 737.29736, - 'x': 1115.7112, - 'z': 54.076469 - }, - 'type': 'LOWER_LIP' - }, { - 'position': { - 'y': 680.62927, - 'x': 1017.0475, - 'z': 72.948006 - }, - 'type': 'MOUTH_LEFT' - }, { - 'position': { - 'y': 681.53552, - 'x': 1191.5186, - 'z': 87.198334 - }, - 'type': 'MOUTH_RIGHT' - }, { - 'position': { - 'y': 702.3808, - 'x': 1115.4193, - 'z': 42.56889 - }, - 'type': 'MOUTH_CENTER' - }, { - 'position': { - 'y': 606.68555, - 'x': 1169.0006, - 'z': 33.98217 - }, - 'type': 'NOSE_BOTTOM_RIGHT' - }, { - 'position': { - 'y': 612.71509, - 'x': 1053.9476, - 'z': 23.409685 - }, - 'type': 'NOSE_BOTTOM_LEFT' - }, { - 'position': { - 'y': 634.95532, - 'x': 1116.6818, - 'z': 3.386874 - }, - 'type': 'NOSE_BOTTOM_CENTER' - }, { - 'position': { - 'y': 476.70197, - 'x': 1009.2689, - 'z': -16.84004 - }, - 'type': 'LEFT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 491.64874, - 'x': 1049.3926, - 'z': 7.0493474 - }, - 'type': 'LEFT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 499.426, - 'x': 1003.9925, - 'z': 3.5417991 - }, - 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 482.37302, - 'x': 964.48242, - 'z': 14.96223 - }, - 'type': 'LEFT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 487.90195, - 'x': 1005.3607, - 'z': -4.7375555 - }, - 'type': 'LEFT_EYE_PUPIL' - }, { - 'position': { - 'y': 468.33276, - 'x': 1212.7329, - 'z': 3.5585577 - }, - 'type': 'RIGHT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 470.92487, - 'x': 1251.7043, - 'z': 43.794273 - }, - 'type': 'RIGHT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 486.98676, - 'x': 1217.4629, - 'z': 23.580008 - }, - 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 482.41071, - 'x': 1173.4624, - 'z': 18.852427 - }, - 'type': 'RIGHT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 479.32739, - 'x': 1213.9757, - 'z': 16.041821 - }, - 'type': 'RIGHT_EYE_PUPIL' - }, { - 'position': { - 'y': 424.38797, - 'x': 1001.2206, - 'z': -46.463905 - }, - 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 415.33655, - 'x': 1221.9457, - 'z': -24.29454 - }, - 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 506.88251, - 'x': 851.96124, - 'z': 257.9054 - }, - 'type': 'LEFT_EAR_TRAGION' - }, { - 'position': { - 'y': 487.9679, - 'x': 1313.8328, - 'z': 304.29816 - }, - 'type': 'RIGHT_EAR_TRAGION' - }, { - 'position': { - 'y': 447.98254, - 'x': 1114.1573, - 'z': -50.620598 - }, - 'type': 'FOREHEAD_GLABELLA' - }, { - 'position': { - 'y': 815.3302, - 'x': 1113.27, - 'z': 109.69422 - }, - 'type': 'CHIN_GNATHION' - }, { - 'position': { - 'y': 656.20123, - 'x': 884.34106, - 'z': 223.19124 - }, - 'type': 'CHIN_LEFT_GONION' - }, { - 'position': { - 'y': 639.291, - 'x': 1301.2404, - 'z': 265.00647 - }, - 'type': 'CHIN_RIGHT_GONION' - }], - 'sorrowLikelihood': 'VERY_UNLIKELY', - 'surpriseLikelihood': 'VERY_UNLIKELY', - 'tiltAngle': -18.412321, - 'angerLikelihood': 'VERY_UNLIKELY', - 'boundingPoly': { - 'vertices': [{ - 'y': 58, - 'x': 748 - }, { - 'y': 58, - 'x': 1430 - }, { - 'y': 851, - 'x': 1430 - }, { - 'y': 851, - 'x': 748 - }] - }, - 'rollAngle': -0.43419784, - 'blurredLikelihood': 'VERY_UNLIKELY', - 'fdBoundingPoly': { - 'vertices': [{ - 'y': 310, - 'x': 845 - }, { - 'y': 310, - 'x': 1379 - }, { - 'y': 844, - 'x': 1379 - }, { - 'y': 844, - 'x': 845 - }] - } - }, { - 'headwearLikelihood': 'VERY_UNLIKELY', - 'panAngle': -12.609346, - 'underExposedLikelihood': 'VERY_UNLIKELY', - 'landmarkingConfidence': 0.56890666, - 'detectionConfidence': 0.96333671, - 'joyLikelihood': 'VERY_LIKELY', - 'landmarks': [{ - 'position': { - 'y': 604.24847, - 'x': 1655.8817, - 'z': -0.0023633335 - }, - 'type': 'LEFT_EYE' - }, { - 'position': { - 'y': 590.82428, - 'x': 1797.3677, - 'z': -30.984835 - }, - 'type': 'RIGHT_EYE' - }, { - 'position': { - 'y': 574.40173, - 'x': 1609.9617, - 'z': 14.634346 - }, - 'type': 'LEFT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 576.57483, - 'x': 1682.0824, - 'z': -41.733879 - }, - 'type': 'RIGHT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 571.701, - 'x': 1749.7633, - 'z': -56.105503 - }, - 'type': 'LEFT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 556.67511, - 'x': 1837.4333, - 'z': -35.228374 - }, - 'type': 'RIGHT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 600.41345, - 'x': 1720.1719, - 'z': -44.4393 - }, - 'type': 'MIDPOINT_BETWEEN_EYES' - }, { - 'position': { - 'y': 691.66907, - 'x': 1720.0095, - 'z': -63.878113 - }, - 'type': 'NOSE_TIP' - }, { - 'position': { - 'y': 731.63239, - 'x': 1733.2758, - 'z': -20.964622 - }, - 'type': 'UPPER_LIP' - }, { - 'position': { - 'y': 774.79138, - 'x': 1740.1494, - 'z': -0.038273316 - }, - 'type': 'LOWER_LIP' - }, { - 'position': { - 'y': 739.80981, - 'x': 1673.0156, - 'z': 35.655769 - }, - 'type': 'MOUTH_LEFT' - }, { - 'position': { - 'y': 728.8186, - 'x': 1808.8899, - 'z': 9.5512733 - }, - 'type': 'MOUTH_RIGHT' - }, { - 'position': { - 'y': 753.71118, - 'x': 1738.0863, - 'z': -5.2711153 - }, - 'type': 'MOUTH_CENTER' - }, { - 'position': { - 'y': 684.97522, - 'x': 1770.2415, - 'z': -18.243216 - }, - 'type': 'NOSE_BOTTOM_RIGHT' - }, { - 'position': { - 'y': 695.69922, - 'x': 1693.4669, - 'z': -0.6566487 - }, - 'type': 'NOSE_BOTTOM_LEFT' - }, { - 'position': { - 'y': 704.46063, - 'x': 1729.86, - 'z': -28.144602 - }, - 'type': 'NOSE_BOTTOM_CENTER' - }, { - 'position': { - 'y': 597.93713, - 'x': 1654.082, - 'z': -11.508363 - }, - 'type': 'LEFT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 605.889, - 'x': 1684.0094, - 'z': -5.0379925 - }, - 'type': 'LEFT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 614.40448, - 'x': 1656.4753, - 'z': 1.001922 - }, - 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 604.11292, - 'x': 1632.2733, - 'z': 18.163708 - }, - 'type': 'LEFT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 606.02026, - 'x': 1654.1372, - 'z': -3.3510325 - }, - 'type': 'LEFT_EYE_PUPIL' - }, { - 'position': { - 'y': 588.00885, - 'x': 1790.3329, - 'z': -41.150127 - }, - 'type': 'RIGHT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 590.46307, - 'x': 1824.5522, - 'z': -23.20849 - }, - 'type': 'RIGHT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 601.75946, - 'x': 1797.9852, - 'z': -29.095766 - }, - 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 598.66449, - 'x': 1768.7595, - 'z': -23.117319 - }, - 'type': 'RIGHT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 595.84918, - 'x': 1794.0195, - 'z': -33.897068 - }, - 'type': 'RIGHT_EYE_PUPIL' - }, { - 'position': { - 'y': 561.08679, - 'x': 1641.9266, - 'z': -26.653444 - }, - 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 550.38129, - 'x': 1789.6267, - 'z': -58.874447 - }, - 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 632.54456, - 'x': 1611.1659, - 'z': 198.83691 - }, - 'type': 'LEFT_EAR_TRAGION' - }, { - 'position': { - 'y': 610.1615, - 'x': 1920.511, - 'z': 131.28908 - }, - 'type': 'RIGHT_EAR_TRAGION' - }, { - 'position': { - 'y': 574.28448, - 'x': 1714.6324, - 'z': -54.497036 - }, - 'type': 'FOREHEAD_GLABELLA' - }, { - 'position': { - 'y': 830.93884, - 'x': 1752.2703, - 'z': 33.332912 - }, - 'type': 'CHIN_GNATHION' - }, { - 'position': { - 'y': 732.33936, - 'x': 1626.519, - 'z': 162.6319 - }, - 'type': 'CHIN_LEFT_GONION' - }, { - 'position': { - 'y': 712.21118, - 'x': 1905.7007, - 'z': 101.86344 - }, - 'type': 'CHIN_RIGHT_GONION' - }], - 'sorrowLikelihood': 'VERY_UNLIKELY', - 'surpriseLikelihood': 'VERY_UNLIKELY', - 'tiltAngle': -13.636207, - 'angerLikelihood': 'VERY_UNLIKELY', - 'boundingPoly': { - 'vertices': [{ - 'y': 319, - 'x': 1524 - }, { - 'y': 319, - 'x': 1959 - }, { - 'y': 859, - 'x': 1959 - }, { - 'y': 859, - 'x': 1524 - }] - }, - 'rollAngle': -7.1766233, - 'blurredLikelihood': 'VERY_UNLIKELY', - 'fdBoundingPoly': { - 'vertices': [{ - 'y': 485, - 'x': 1559 - }, { - 'y': 485, - 'x': 1920 - }, { - 'y': 846, - 'x': 1920 - }, { - 'y': 846, - 'x': 1559 - }] - } - }, { - 'headwearLikelihood': 'VERY_UNLIKELY', - 'panAngle': 8.7634687, - 'underExposedLikelihood': 'VERY_UNLIKELY', - 'landmarkingConfidence': 0.45491594, - 'detectionConfidence': 0.98870116, - 'joyLikelihood': 'VERY_LIKELY', - 'landmarks': [{ - 'position': { - 'y': 678.57886, - 'x': 397.22269, - 'z': 0.00052442803 - }, - 'type': 'LEFT_EYE' - }, { - 'position': { - 'y': 671.90381, - 'x': 515.38159, - 'z': 17.843918 - }, - 'type': 'RIGHT_EYE' - }, { - 'position': { - 'y': 657.13904, - 'x': 361.41068, - 'z': 6.1270714 - }, - 'type': 'LEFT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 649.82916, - 'x': 432.9726, - 'z': -16.12303 - }, - 'type': 'RIGHT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 646.04272, - 'x': 487.78485, - 'z': -7.638854 - }, - 'type': 'LEFT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 642.4032, - 'x': 549.46954, - 'z': 35.154259 - }, - 'type': 'RIGHT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 672.44031, - 'x': 462.86993, - 'z': -14.413016 - }, - 'type': 'MIDPOINT_BETWEEN_EYES' - }, { - 'position': { - 'y': 736.5896, - 'x': 474.0661, - 'z': -50.206612 - }, - 'type': 'NOSE_TIP' - }, { - 'position': { - 'y': 775.34973, - 'x': 472.54224, - 'z': -25.24843 - }, - 'type': 'UPPER_LIP' - }, { - 'position': { - 'y': 820.41418, - 'x': 474.41162, - 'z': -18.226196 - }, - 'type': 'LOWER_LIP' - }, { - 'position': { - 'y': 797.35547, - 'x': 415.29095, - 'z': 0.069621459 - }, - 'type': 'MOUTH_LEFT' - }, { - 'position': { - 'y': 786.58917, - 'x': 519.26709, - 'z': 13.945135 - }, - 'type': 'MOUTH_RIGHT' - }, { - 'position': { - 'y': 798.462, - 'x': 472.48071, - 'z': -17.317541 - }, - 'type': 'MOUTH_CENTER' - }, { - 'position': { - 'y': 742.13464, - 'x': 498.90826, - 'z': -1.8338414 - }, - 'type': 'NOSE_BOTTOM_RIGHT' - }, { - 'position': { - 'y': 747.218, - 'x': 438.95078, - 'z': -11.851667 - }, - 'type': 'NOSE_BOTTOM_LEFT' - }, { - 'position': { - 'y': 754.20105, - 'x': 472.47375, - 'z': -24.760784 - }, - 'type': 'NOSE_BOTTOM_CENTER' - }, { - 'position': { - 'y': 672.1994, - 'x': 403.39957, - 'z': -6.9005938 - }, - 'type': 'LEFT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 679.914, - 'x': 425.36029, - 'z': 4.3264537 - }, - 'type': 'LEFT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 687.11792, - 'x': 401.66464, - 'z': -0.79697126 - }, - 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 682.9585, - 'x': 378.93005, - 'z': 7.3909378 - }, - 'type': 'LEFT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 680.40326, - 'x': 401.7229, - 'z': -2.7444897 - }, - 'type': 'LEFT_EYE_PUPIL' - }, { - 'position': { - 'y': 663.39496, - 'x': 516.03217, - 'z': 10.454485 - }, - 'type': 'RIGHT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 670.74463, - 'x': 536.45978, - 'z': 31.652559 - }, - 'type': 'RIGHT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 679.21289, - 'x': 517.50879, - 'z': 16.653259 - }, - 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 676.06976, - 'x': 495.27335, - 'z': 14.956539 - }, - 'type': 'RIGHT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 671.41052, - 'x': 517.3429, - 'z': 15.007857 - }, - 'type': 'RIGHT_EYE_PUPIL' - }, { - 'position': { - 'y': 639.23633, - 'x': 396.8494, - 'z': -12.132922 - }, - 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 629.66724, - 'x': 518.96332, - 'z': 6.7055798 - }, - 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 750.20837, - 'x': 313.60855, - 'z': 127.8474 - }, - 'type': 'LEFT_EAR_TRAGION' - }, { - 'position': { - 'y': 728.68243, - 'x': 570.95, - 'z': 166.43564 - }, - 'type': 'RIGHT_EAR_TRAGION' - }, { - 'position': { - 'y': 646.05042, - 'x': 460.94397, - 'z': -16.196959 - }, - 'type': 'FOREHEAD_GLABELLA' - }, { - 'position': { - 'y': 869.36255, - 'x': 476.69009, - 'z': -4.4716644 - }, - 'type': 'CHIN_GNATHION' - }, { - 'position': { - 'y': 818.48083, - 'x': 340.65454, - 'z': 80.163544 - }, - 'type': 'CHIN_LEFT_GONION' - }, { - 'position': { - 'y': 800.17029, - 'x': 571.60297, - 'z': 115.88489 - }, - 'type': 'CHIN_RIGHT_GONION' - }], - 'sorrowLikelihood': 'VERY_UNLIKELY', - 'surpriseLikelihood': 'VERY_UNLIKELY', - 'tiltAngle': 2.1818738, - 'angerLikelihood': 'VERY_UNLIKELY', - 'boundingPoly': { - 'vertices': [{ - 'y': 481, - 'x': 257 - }, { - 'y': 481, - 'x': 636 - }, { - 'y': 922, - 'x': 636 - }, { - 'y': 922, - 'x': 257 - }] - }, - 'rollAngle': -4.8415074, - 'blurredLikelihood': 'VERY_UNLIKELY', - 'fdBoundingPoly': { - 'vertices': [{ - 'y': 597, - 'x': 315 - }, { - 'y': 597, - 'x': 593 - }, { - 'y': 874, - 'x': 593 - }, { - 'y': 874, - 'x': 315 - }] - } - }, { - 'headwearLikelihood': 'VERY_UNLIKELY', - 'panAngle': 13.486016, - 'underExposedLikelihood': 'VERY_UNLIKELY', - 'landmarkingConfidence': 0.22890881, - 'detectionConfidence': 0.91653949, - 'joyLikelihood': 'LIKELY', - 'landmarks': [{ - 'position': { - 'y': 549.30334, - 'x': 9.7225485, - 'z': 0.0014079071 - }, - 'type': 'LEFT_EYE' - }, { - 'position': { - 'y': 539.7489, - 'x': 128.87411, - 'z': 28.692257 - }, - 'type': 'RIGHT_EYE' - }, { - 'position': { - 'y': 523.62103, - 'x': -35.406662, - 'z': -0.67885911 - }, - 'type': 'LEFT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 519.99487, - 'x': 42.973644, - 'z': -18.105515 - }, - 'type': 'RIGHT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 514.23407, - 'x': 103.02193, - 'z': -4.1667485 - }, - 'type': 'LEFT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 505.69614, - 'x': 165.63609, - 'z': 47.583176 - }, - 'type': 'RIGHT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 540.9787, - 'x': 76.066139, - 'z': -11.183347 - }, - 'type': 'MIDPOINT_BETWEEN_EYES' - }, { - 'position': { - 'y': 615.48669, - 'x': 89.695564, - 'z': -41.252846 - }, - 'type': 'NOSE_TIP' - }, { - 'position': { - 'y': 658.39246, - 'x': 85.935593, - 'z': -9.70177 - }, - 'type': 'UPPER_LIP' - }, { - 'position': { - 'y': 703.04309, - 'x': 87.266853, - 'z': 2.6370313 - }, - 'type': 'LOWER_LIP' - }, { - 'position': { - 'y': 678.54712, - 'x': 31.584759, - 'z': 12.874522 - }, - 'type': 'MOUTH_LEFT' - }, { - 'position': { - 'y': 670.44092, - 'x': 126.54009, - 'z': 35.510525 - }, - 'type': 'MOUTH_RIGHT' - }, { - 'position': { - 'y': 677.92883, - 'x': 85.152267, - 'z': 0.89151889 - }, - 'type': 'MOUTH_CENTER' - }, { - 'position': { - 'y': 618.41052, - 'x': 112.767, - 'z': 14.021111 - }, - 'type': 'NOSE_BOTTOM_RIGHT' - }, { - 'position': { - 'y': 624.28644, - 'x': 45.776546, - 'z': -2.0218573 - }, - 'type': 'NOSE_BOTTOM_LEFT' - }, { - 'position': { - 'y': 632.9657, - 'x': 84.253586, - 'z': -12.025499 - }, - 'type': 'NOSE_BOTTOM_CENTER' - }, { - 'position': { - 'y': 541.79987, - 'x': 11.081995, - 'z': -8.7047234 - }, - 'type': 'LEFT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 549.57306, - 'x': 35.396069, - 'z': 6.4817863 - }, - 'type': 'LEFT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 557.55121, - 'x': 10.446005, - 'z': -0.37798333 - }, - 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 551.75134, - 'x': -16.862394, - 'z': 5.4017038 - }, - 'type': 'LEFT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 550.14355, - 'x': 8.5758247, - 'z': -3.3803346 - }, - 'type': 'LEFT_EYE_PUPIL' - }, { - 'position': { - 'y': 531.02594, - 'x': 131.48265, - 'z': 20.201307 - }, - 'type': 'RIGHT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 536.71674, - 'x': 151.31306, - 'z': 45.753532 - }, - 'type': 'RIGHT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 547.00037, - 'x': 130.27722, - 'z': 28.447813 - }, - 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 542.38531, - 'x': 106.59242, - 'z': 23.77187 - }, - 'type': 'RIGHT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 539.12781, - 'x': 132.16141, - 'z': 26.180428 - }, - 'type': 'RIGHT_EYE_PUPIL' - }, { - 'position': { - 'y': 506.64093, - 'x': 4.8589344, - 'z': -18.679537 - }, - 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 494.94244, - 'x': 135.53185, - 'z': 12.703153 - }, - 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 609.03503, - 'x': -98.89212, - 'z': 134.96341 - }, - 'type': 'LEFT_EAR_TRAGION' - }, { - 'position': { - 'y': 584.60681, - 'x': 174.55208, - 'z': 200.56409 - }, - 'type': 'RIGHT_EAR_TRAGION' - }, { - 'position': { - 'y': 514.88513, - 'x': 74.575394, - 'z': -15.91002 - }, - 'type': 'FOREHEAD_GLABELLA' - }, { - 'position': { - 'y': 755.372, - 'x': 86.603539, - 'z': 23.596317 - }, - 'type': 'CHIN_GNATHION' - }, { - 'position': { - 'y': 689.8385, - 'x': -67.949554, - 'z': 94.833694 - }, - 'type': 'CHIN_LEFT_GONION' - }, { - 'position': { - 'y': 667.89325, - 'x': 179.19363, - 'z': 154.18192 - }, - 'type': 'CHIN_RIGHT_GONION' - }], - 'sorrowLikelihood': 'VERY_UNLIKELY', - 'surpriseLikelihood': 'VERY_UNLIKELY', - 'tiltAngle': -4.1819687, - 'angerLikelihood': 'VERY_UNLIKELY', - 'boundingPoly': { - 'vertices': [{ - 'y': 322 - }, { - 'y': 322, - 'x': 252 - }, { - 'y': 800, - 'x': 252 - }, { - 'y': 800 - }] - }, - 'rollAngle': -4.1248608, - 'blurredLikelihood': 'LIKELY', - 'fdBoundingPoly': { - 'vertices': [{ - 'y': 450 - }, { - 'y': 450, - 'x': 235 - }, { - 'y': 745, - 'x': 235 - }, { - 'y': 745 - }] - } - }, { - 'headwearLikelihood': 'VERY_UNLIKELY', - 'panAngle': 4.0344138, - 'underExposedLikelihood': 'VERY_UNLIKELY', - 'landmarkingConfidence': 0.16798845, - 'detectionConfidence': 0.7605139, - 'joyLikelihood': 'VERY_LIKELY', - 'landmarks': [{ - 'position': { - 'y': 637.85211, - 'x': 676.09375, - 'z': 4.3306696e-05 - }, - 'type': 'LEFT_EYE' - }, { - 'position': { - 'y': 637.43292, - 'x': 767.7132, - 'z': 6.4413033 - }, - 'type': 'RIGHT_EYE' - }, { - 'position': { - 'y': 614.27075, - 'x': 642.07782, - 'z': 3.731837 - }, - 'type': 'LEFT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 617.27216, - 'x': 700.90112, - 'z': -19.774208 - }, - 'type': 'RIGHT_OF_LEFT_EYEBROW' - }, { - 'position': { - 'y': 617.15649, - 'x': 747.60974, - 'z': -16.511871 - }, - 'type': 'LEFT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 614.018, - 'x': 802.60638, - 'z': 14.954031 - }, - 'type': 'RIGHT_OF_RIGHT_EYEBROW' - }, { - 'position': { - 'y': 638.11755, - 'x': 724.42511, - 'z': -16.930967 - }, - 'type': 'MIDPOINT_BETWEEN_EYES' - }, { - 'position': { - 'y': 696.08392, - 'x': 725.82532, - 'z': -38.252609 - }, - 'type': 'NOSE_TIP' - }, { - 'position': { - 'y': 727.826, - 'x': 724.0116, - 'z': -11.615328 - }, - 'type': 'UPPER_LIP' - }, { - 'position': { - 'y': 760.22595, - 'x': 723.30157, - 'z': -0.454926 - }, - 'type': 'LOWER_LIP' - }, { - 'position': { - 'y': 738.67548, - 'x': 684.35724, - 'z': 13.192401 - }, - 'type': 'MOUTH_LEFT' - }, { - 'position': { - 'y': 738.53015, - 'x': 759.91022, - 'z': 18.485643 - }, - 'type': 'MOUTH_RIGHT' - }, { - 'position': { - 'y': 742.42737, - 'x': 723.45239, - 'z': -2.4991846 - }, - 'type': 'MOUTH_CENTER' - }, { - 'position': { - 'y': 698.4281, - 'x': 749.50385, - 'z': 1.1831931 - }, - 'type': 'NOSE_BOTTOM_RIGHT' - }, { - 'position': { - 'y': 698.48151, - 'x': 696.923, - 'z': -2.4809308 - }, - 'type': 'NOSE_BOTTOM_LEFT' - }, { - 'position': { - 'y': 708.10651, - 'x': 724.18506, - 'z': -14.418536 - }, - 'type': 'NOSE_BOTTOM_CENTER' - }, { - 'position': { - 'y': 632.12128, - 'x': 675.22388, - 'z': -7.2390652 - }, - 'type': 'LEFT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 638.59021, - 'x': 694.03516, - 'z': 1.7715795 - }, - 'type': 'LEFT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 644.33356, - 'x': 674.92206, - 'z': -0.037067439 - }, - 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 637.16479, - 'x': 655.035, - 'z': 7.4372306 - }, - 'type': 'LEFT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 638.18683, - 'x': 673.39447, - 'z': -2.4558623 - }, - 'type': 'LEFT_EYE_PUPIL' - }, { - 'position': { - 'y': 631.96063, - 'x': 771.31744, - 'z': -0.51439536 - }, - 'type': 'RIGHT_EYE_TOP_BOUNDARY' - }, { - 'position': { - 'y': 636.94287, - 'x': 789.29443, - 'z': 16.814001 - }, - 'type': 'RIGHT_EYE_RIGHT_CORNER' - }, { - 'position': { - 'y': 644.21619, - 'x': 770.13458, - 'z': 6.6525826 - }, - 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' - }, { - 'position': { - 'y': 638.75732, - 'x': 752.51831, - 'z': 5.8927159 - }, - 'type': 'RIGHT_EYE_LEFT_CORNER' - }, { - 'position': { - 'y': 638.06738, - 'x': 772.04718, - 'z': 4.350193 - }, - 'type': 'RIGHT_EYE_PUPIL' - }, { - 'position': { - 'y': 604.87769, - 'x': 671.68707, - 'z': -15.778968 - }, - 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 604.71191, - 'x': 775.98663, - 'z': -8.4828024 - }, - 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' - }, { - 'position': { - 'y': 670.40063, - 'x': 605.07721, - 'z': 119.27386 - }, - 'type': 'LEFT_EAR_TRAGION' - }, { - 'position': { - 'y': 669.99823, - 'x': 823.42841, - 'z': 134.54482 - }, - 'type': 'RIGHT_EAR_TRAGION' - }, { - 'position': { - 'y': 616.47058, - 'x': 724.54547, - 'z': -21.861612 - }, - 'type': 'FOREHEAD_GLABELLA' - }, { - 'position': { - 'y': 801.31934, - 'x': 722.071, - 'z': 18.37034 - }, - 'type': 'CHIN_GNATHION' - }, { - 'position': { - 'y': 736.57159, - 'x': 617.91388, - 'z': 88.713562 - }, - 'type': 'CHIN_LEFT_GONION' - }, { - 'position': { - 'y': 736.21118, - 'x': 815.234, - 'z': 102.52047 - }, - 'type': 'CHIN_RIGHT_GONION' - }], - 'sorrowLikelihood': 'VERY_UNLIKELY', - 'surpriseLikelihood': 'VERY_UNLIKELY', - 'tiltAngle': -7.0173812, - 'angerLikelihood': 'VERY_UNLIKELY', - 'boundingPoly': { - 'vertices': [{ - 'y': 459, - 'x': 557 - }, { - 'y': 459, - 'x': 875 - }, { - 'y': 829, - 'x': 875 - }, { - 'y': 829, - 'x': 557 - }] - }, - 'rollAngle': 0.38634345, - 'blurredLikelihood': 'LIKELY', - 'fdBoundingPoly': { - 'vertices': [{ - 'y': 570, - 'x': 612 - }, { - 'y': 570, - 'x': 837 - }, { - 'y': 795, - 'x': 837 - }, { - 'y': 795, - 'x': 612 - }] - } - }] - }] -} - - -MULTIPLE_RESPONSE = { - 'responses': [ - { - 'labelAnnotations': [ - { - 'mid': '/m/0k4j', - 'description': 'automobile', - 'score': 0.9776855 - }, - { - 'mid': '/m/07yv9', - 'description': 'vehicle', - 'score': 0.947987 - }, - { - 'mid': '/m/07r04', - 'description': 'truck', - 'score': 0.88429511 - }, - ], - }, - { - 'safeSearchAnnotation': { - 'adult': 'VERY_UNLIKELY', - 'spoof': 'UNLIKELY', - 'medical': 'POSSIBLE', - 'violence': 'VERY_UNLIKELY' - }, - }, - ], -} - - -SAFE_SEARCH_DETECTION_RESPONSE = { - 'responses': [ - { - 'safeSearchAnnotation': { - 'adult': 'VERY_UNLIKELY', - 'spoof': 'UNLIKELY', - 'medical': 'POSSIBLE', - 'violence': 'VERY_UNLIKELY' - } - } - ] -} - - -TEXT_DETECTION_RESPONSE = { - 'responses': [ - { - 'textAnnotations': [ - { - 'locale': 'en', - 'description': 'Google CloudPlatform\n', - 'boundingPoly': { - 'vertices': [ - { - 'x': 129, - 'y': 694 - }, - { - 'x': 1375, - 'y': 694 - }, - { - 'x': 1375, - 'y': 835 - }, - { - 'x': 129, - 'y': 835 - } - ] - } - }, - { - 'description': 'Google', - 'boundingPoly': { - 'vertices': [ - { - 'x': 129, - 'y': 694 - }, - { - 'x': 535, - 'y': 694 - }, - { - 'x': 535, - 'y': 835 - }, - { - 'x': 129, - 'y': 835 - } - ] - } - }, - { - 'description': 'CloudPlatform', - 'boundingPoly': { - 'vertices': [ - { - 'x': 567, - 'y': 694 - }, - { - 'x': 1375, - 'y': 694 - }, - { - 'x': 1375, - 'y': 835 - }, - { - 'x': 567, - 'y': 835 - } - ] - } - } - ] - } - ] -} - - -WEB_DETECTION_RESPONSE = { - 'responses': [ - { - 'webDetection': { - 'partialMatchingImages': [{ - 'score': 0.9216, - 'url': 'https://cloud.google.com/vision' - }, { - 'score': 0.55520177, - 'url': 'https://cloud.google.com/vision' - }], - 'fullMatchingImages': [{ - 'score': 0.09591467, - 'url': 'https://cloud.google.com/vision' - }, { - 'score': 0.09591467, - 'url': 'https://cloud.google.com/vision' - }], - 'webEntities': [{ - 'entityId': '/m/019dvv', - 'score': 1470.4435, - 'description': 'Mount Rushmore National Memorial' - }, { - 'entityId': '/m/05_5t0l', - 'score': 0.9468027, - 'description': 'Landmark' - }], - 'pagesWithMatchingImages': [{ - 'score': 2.9996617, - 'url': 'https://cloud.google.com/vision' - }, { - 'score': 1.1980441, - 'url': 'https://cloud.google.com/vision' - }] - } - } - ] -} diff --git a/vision/tests/unit/test__gax.py b/vision/tests/unit/test__gax.py deleted file mode 100644 index b2c0ea5ab4303..0000000000000 --- a/vision/tests/unit/test__gax.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -class TestGAXClient(unittest.TestCase): - def _get_target_class(self): - from google.cloud.vision._gax import _GAPICVisionAPI - - return _GAPICVisionAPI - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_ctor(self): - client = mock.Mock( - _credentials=_make_credentials(), - spec=['_credentials'], - ) - with mock.patch('google.cloud.vision._gax.image_annotator_client.' - 'ImageAnnotatorClient'): - api = self._make_one(client) - self.assertIs(api._client, client) - - def test_gapic_credentials(self): - from google.cloud.gapic.vision.v1.image_annotator_client import ( - ImageAnnotatorClient) - - from google.cloud.vision import Client - - # Mock the GAPIC ImageAnnotatorClient, whose arguments we - # want to check. - with mock.patch.object(ImageAnnotatorClient, '__init__') as iac: - iac.return_value = None - - # Create the GAX client. - credentials = _make_credentials() - client = Client(credentials=credentials, project='foo') - self._make_one(client=client) - - # Assert that the GAPIC constructor was called once, and - # that the credentials were sent. - iac.assert_called_once() - _, _, kwargs = iac.mock_calls[0] - self.assertIs(kwargs['credentials'], credentials) - - def test_kwarg_lib_name(self): - from google.cloud.gapic.vision.v1.image_annotator_client import ( - ImageAnnotatorClient) - from google.cloud.vision import __version__ - from google.cloud.vision import Client - - # Mock the GAPIC ImageAnnotatorClient, whose arguments we - # want to check. - with mock.patch.object(ImageAnnotatorClient, '__init__') as iac: - iac.return_value = None - - # Create the GAX client. - client = Client(credentials=_make_credentials(), project='foo') - self._make_one(client=client) - - # Assert that the GAPIC constructor was called once, and - # that lib_name and lib_version were sent. - iac.assert_called_once() - _, _, kwargs = iac.mock_calls[0] - self.assertEqual(kwargs['lib_name'], 'gccl') - self.assertEqual(kwargs['lib_version'], __version__) - - def test_annotation(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - - client = mock.Mock(spec_set=['_credentials']) - feature = Feature(FeatureTypes.LABEL_DETECTION, 5) - image_content = b'abc 1 2 3' - image = Image(client, content=image_content) - with mock.patch('google.cloud.vision._gax.image_annotator_client.' - 'ImageAnnotatorClient'): - gax_api = self._make_one(client) - - mock_response = { - 'batch_annotate_images.return_value': - mock.Mock(responses=['mock response data']), - } - - gax_api._annotator_client = mock.Mock( - spec_set=['batch_annotate_images'], **mock_response) - - with mock.patch('google.cloud.vision._gax.Annotations') as mock_anno: - images = ((image, [feature]),) - gax_api.annotate(images) - mock_anno.from_pb.assert_called_with('mock response data') - gax_api._annotator_client.batch_annotate_images.assert_called() - - def test_annotate_no_requests(self): - client = mock.Mock(spec_set=['_credentials']) - with mock.patch('google.cloud.vision._gax.image_annotator_client.' - 'ImageAnnotatorClient'): - gax_api = self._make_one(client) - - response = gax_api.annotate() - self.assertEqual(response, []) - gax_api._annotator_client.batch_annotate_images.assert_not_called() - - def test_annotate_no_results(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - - client = mock.Mock(spec_set=['_credentials']) - feature = Feature(FeatureTypes.LABEL_DETECTION, 5) - image_content = b'abc 1 2 3' - image = Image(client, content=image_content) - with mock.patch('google.cloud.vision._gax.image_annotator_client.' - 'ImageAnnotatorClient'): - gax_api = self._make_one(client) - - mock_response = { - 'batch_annotate_images.return_value': mock.Mock(responses=[]), - } - - gax_api._annotator_client = mock.Mock( - spec_set=['batch_annotate_images'], **mock_response) - with mock.patch('google.cloud.vision._gax.Annotations'): - images = ((image, [feature]),) - response = gax_api.annotate(images) - self.assertEqual(len(response), 0) - self.assertIsInstance(response, list) - - gax_api._annotator_client.batch_annotate_images.assert_called() - - def test_annotate_multiple_results(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.cloud.vision.annotations import Annotations - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - - client = mock.Mock(spec_set=['_credentials']) - feature = Feature(FeatureTypes.LABEL_DETECTION, 5) - image_content = b'abc 1 2 3' - image = Image(client, content=image_content) - with mock.patch('google.cloud.vision._gax.image_annotator_client.' - 'ImageAnnotatorClient'): - gax_api = self._make_one(client) - - responses = [ - image_annotator_pb2.AnnotateImageResponse(), - image_annotator_pb2.AnnotateImageResponse(), - ] - response = image_annotator_pb2.BatchAnnotateImagesResponse( - responses=responses) - - gax_api._annotator_client = mock.Mock( - spec_set=['batch_annotate_images']) - gax_api._annotator_client.batch_annotate_images.return_value = response - images = ((image, [feature]),) - responses = gax_api.annotate(images) - - self.assertEqual(len(responses), 2) - self.assertIsInstance(responses[0], Annotations) - self.assertIsInstance(responses[1], Annotations) - gax_api._annotator_client.batch_annotate_images.assert_called() - - def test_annotate_with_pb_requests_results(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.cloud.vision.annotations import Annotations - - client = mock.Mock(spec_set=['_credentials']) - - feature_type = image_annotator_pb2.Feature.CROP_HINTS - feature = image_annotator_pb2.Feature(type=feature_type, max_results=2) - - image_content = b'abc 1 2 3' - image = image_annotator_pb2.Image(content=image_content) - - aspect_ratios = [1.3333, 1.7777] - crop_hints_params = image_annotator_pb2.CropHintsParams( - aspect_ratios=aspect_ratios) - image_context = image_annotator_pb2.ImageContext( - crop_hints_params=crop_hints_params) - request = image_annotator_pb2.AnnotateImageRequest( - image=image, features=[feature], image_context=image_context) - - with mock.patch('google.cloud.vision._gax.image_annotator_client.' - 'ImageAnnotatorClient'): - gax_api = self._make_one(client) - - responses = [ - image_annotator_pb2.AnnotateImageResponse(), - image_annotator_pb2.AnnotateImageResponse(), - ] - response = image_annotator_pb2.BatchAnnotateImagesResponse( - responses=responses) - - gax_api._annotator_client = mock.Mock( - spec_set=['batch_annotate_images']) - gax_api._annotator_client.batch_annotate_images.return_value = response - responses = gax_api.annotate(requests_pb=[request]) - - self.assertEqual(len(responses), 2) - for annotation in responses: - self.assertIsInstance(annotation, Annotations) - gax_api._annotator_client.batch_annotate_images.assert_called() - - -class Test__to_gapic_feature(unittest.TestCase): - def _call_fut(self, feature): - from google.cloud.vision._gax import _to_gapic_feature - return _to_gapic_feature(feature) - - def test__to_gapic_feature(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - feature = Feature(FeatureTypes.LABEL_DETECTION, 5) - feature_pb = self._call_fut(feature) - self.assertIsInstance(feature_pb, image_annotator_pb2.Feature) - self.assertEqual(feature_pb.type, 4) - self.assertEqual(feature_pb.max_results, 5) - - -class Test__to_gapic_image(unittest.TestCase): - def _call_fut(self, image): - from google.cloud.vision._gax import _to_gapic_image - - return _to_gapic_image(image) - - def test__to_gapic_image_content(self): - from google.cloud.vision.image import Image - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - image_content = b'abc 1 2 3' - client = object() - image = Image(client, content=image_content) - image_pb = self._call_fut(image) - self.assertIsInstance(image_pb, image_annotator_pb2.Image) - self.assertEqual(image_pb.content, image_content) - - def test__to_gapic_gcs_image_uri(self): - from google.cloud.vision.image import Image - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - image_uri = 'gs://1234/34.jpg' - client = object() - image = Image(client, source_uri=image_uri) - image_pb = self._call_fut(image) - self.assertIsInstance(image_pb, image_annotator_pb2.Image) - self.assertEqual(image_pb.source.gcs_image_uri, image_uri) - - def test__to_gapic_image_uri(self): - from google.cloud.vision.image import Image - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - image_uri = 'http://1234/34.jpg' - client = object() - image = Image(client, source_uri=image_uri) - image_pb = self._call_fut(image) - self.assertIsInstance(image_pb, image_annotator_pb2.Image) - self.assertEqual(image_pb.source.image_uri, image_uri) - - def test__to_gapic_invalid_image_uri(self): - from google.cloud.vision.image import Image - - image_uri = 'ftp://1234/34.jpg' - client = object() - image = Image(client, source_uri=image_uri) - with self.assertRaises(ValueError): - self._call_fut(image) - - def test__to_gapic_with_empty_image(self): - image = mock.Mock( - content=None, source=None, spec=['content', 'source']) - with self.assertRaises(ValueError): - self._call_fut(image) diff --git a/vision/tests/unit/test__http.py b/vision/tests/unit/test__http.py deleted file mode 100644 index ee486e409b8a0..0000000000000 --- a/vision/tests/unit/test__http.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import unittest - -import mock - - -IMAGE_CONTENT = b'/9j/4QNURXhpZgAASUkq' -PROJECT = 'PROJECT' -B64_IMAGE_CONTENT = base64.b64encode(IMAGE_CONTENT).decode('ascii') - - -class TestConnection(unittest.TestCase): - - @staticmethod - def _get_target_class(): - from google.cloud.vision._http import Connection - - return Connection - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_default_url(self): - client = object() - conn = self._make_one(client) - self.assertEqual(conn._client, client) - - def test_extra_headers(self): - from google.cloud import _http as base_http - from google.cloud.vision import _http as MUT - - http = mock.Mock(spec=['request']) - response = mock.Mock(status=200, spec=['status']) - data = b'brent-spiner' - http.request.return_value = response, data - client = mock.Mock(_http=http, spec=['_http']) - - conn = self._make_one(client) - req_data = 'req-data-boring' - result = conn.api_request( - 'GET', '/rainbow', data=req_data, expect_json=False) - self.assertEqual(result, data) - - expected_headers = { - 'Content-Length': str(len(req_data)), - 'Accept-Encoding': 'gzip', - base_http.CLIENT_INFO_HEADER: MUT._CLIENT_INFO, - 'User-Agent': conn.USER_AGENT, - } - expected_uri = conn.build_api_url('/rainbow') - http.request.assert_called_once_with( - body=req_data, - headers=expected_headers, - method='GET', - uri=expected_uri, - ) - - -class Test_HTTPVisionAPI(unittest.TestCase): - def _get_target_class(self): - from google.cloud.vision._http import _HTTPVisionAPI - - return _HTTPVisionAPI - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_call_annotate_with_no_results(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - - client = mock.Mock(spec_set=['_connection']) - feature = Feature(FeatureTypes.LABEL_DETECTION, 5) - image_content = b'abc 1 2 3' - image = Image(client, content=image_content) - - http_api = self._make_one(client) - http_api._connection = mock.Mock(spec_set=['api_request']) - http_api._connection.api_request.return_value = {'responses': []} - images = ((image, [feature]),) - response = http_api.annotate(images) - self.assertEqual(len(response), 0) - self.assertIsInstance(response, list) - - def test_call_annotate_with_no_parameters(self): - client = mock.Mock(spec_set=['_connection']) - http_api = self._make_one(client) - http_api._connection = mock.Mock(spec_set=['api_request']) - - results = http_api.annotate() - self.assertEqual(results, []) - http_api._connection.api_request.assert_not_called() - - def test_call_annotate_with_pb_requests_results(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - client = mock.Mock(spec_set=['_connection']) - - feature_type = image_annotator_pb2.Feature.CROP_HINTS - feature = image_annotator_pb2.Feature(type=feature_type, max_results=2) - - image = image_annotator_pb2.Image(content=IMAGE_CONTENT) - - aspect_ratios = [1.3333, 1.7777] - crop_hints_params = image_annotator_pb2.CropHintsParams( - aspect_ratios=aspect_ratios) - image_context = image_annotator_pb2.ImageContext( - crop_hints_params=crop_hints_params) - request = image_annotator_pb2.AnnotateImageRequest( - image=image, features=[feature], image_context=image_context) - - http_api = self._make_one(client) - http_api._connection = mock.Mock(spec_set=['api_request']) - http_api._connection.api_request.return_value = {'responses': []} - - responses = http_api.annotate(requests_pb=[request]) - - # Establish that one and exactly one api_request call was made. - self.assertEqual(http_api._connection.api_request.call_count, 1) - - # Establish that the basic keyword arguments look correct. - call = http_api._connection.api_request.mock_calls[0] - self.assertEqual(call[2]['method'], 'POST') - self.assertEqual(call[2]['path'], '/images:annotate') - - # Establish that the responses look correct. - self.assertEqual(responses, []) - self.assertEqual(len(responses), 0) - - def test_call_annotate_with_more_than_one_result(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - from google.cloud.vision.likelihood import Likelihood - from tests.unit._fixtures import MULTIPLE_RESPONSE - - client = mock.Mock(spec_set=['_connection']) - feature = Feature(FeatureTypes.LABEL_DETECTION, 5) - image_content = b'abc 1 2 3' - image = Image(client, content=image_content) - - http_api = self._make_one(client) - http_api._connection = mock.Mock(spec_set=['api_request']) - http_api._connection.api_request.return_value = MULTIPLE_RESPONSE - images = ((image, [feature]),) - responses = http_api.annotate(images) - - self.assertEqual(len(responses), 2) - image_one = responses[0] - image_two = responses[1] - self.assertEqual(len(image_one.labels), 3) - self.assertIsInstance(image_one.safe_searches, tuple) - self.assertEqual(image_two.safe_searches.adult, - Likelihood.VERY_UNLIKELY) - self.assertEqual(len(image_two.labels), 0) - - -class TestVisionRequest(unittest.TestCase): - @staticmethod - def _get_target_function(): - from google.cloud.vision._http import _make_request - - return _make_request - - def _call_fut(self, *args, **kw): - return self._get_target_function()(*args, **kw) - - def test_call_vision_request(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - - client = object() - image = Image(client, content=IMAGE_CONTENT) - feature = Feature(feature_type=FeatureTypes.FACE_DETECTION, - max_results=3) - request = self._call_fut(image, feature) - self.assertEqual(request['image'].get('content'), B64_IMAGE_CONTENT) - features = request['features'] - self.assertEqual(len(features), 1) - feature = features[0] - self.assertEqual(feature['type'], FeatureTypes.FACE_DETECTION) - self.assertEqual(feature['maxResults'], 3) - - def test_call_vision_request_with_not_feature(self): - from google.cloud.vision.image import Image - - client = object() - image = Image(client, content=IMAGE_CONTENT) - with self.assertRaises(TypeError): - self._call_fut(image, 'nonsensefeature') - - def test_call_vision_request_with_list_bad_features(self): - from google.cloud.vision.image import Image - - client = object() - image = Image(client, content=IMAGE_CONTENT) - with self.assertRaises(TypeError): - self._call_fut(image, ['nonsensefeature']) diff --git a/vision/tests/unit/test_annotations.py b/vision/tests/unit/test_annotations.py deleted file mode 100644 index 89d03def13a56..0000000000000 --- a/vision/tests/unit/test_annotations.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -def _make_pb_entity(): - from google.cloud.proto.vision.v1 import geometry_pb2 - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.type import latlng_pb2 - - description = 'testing 1 2 3' - locale = 'US' - mid = 'm/w/45342234' - score = 0.390625 - - entity_annotation = image_annotator_pb2.EntityAnnotation( - mid=mid, - locale=locale, - description=description, - score=score, - bounding_poly=geometry_pb2.BoundingPoly( - vertices=[ - geometry_pb2.Vertex(x=1, y=2), - ], - ), - locations=[ - image_annotator_pb2.LocationInfo( - lat_lng=latlng_pb2.LatLng(latitude=1.0, longitude=2.0), - ), - ], - ) - return entity_annotation - - -class TestAnnotations(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.annotations import Annotations - - return Annotations - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - annotations = self._make_one( - faces=[True], properties=[True], labels=[True], landmarks=[True], - logos=[True], safe_searches=[True], texts=[True]) - self.assertEqual(annotations.faces, [True]) - self.assertEqual(annotations.properties, [True]) - self.assertEqual(annotations.labels, [True]) - self.assertEqual(annotations.landmarks, [True]) - self.assertEqual(annotations.logos, [True]) - self.assertEqual(annotations.safe_searches, [True]) - self.assertEqual(annotations.texts, [True]) - - def test_unsupported_http_annotation(self): - returned = { - 'responses': [ - {'someMadeUpAnnotation': None}, - ], - } - annotation = self._get_target_class().from_api_repr(returned) - self.assertIsInstance(annotation, self._get_target_class()) - - def test_from_pb(self): - from google.cloud.vision.likelihood import Likelihood - from google.cloud.vision.safe_search import SafeSearchAnnotation - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - image_response = image_annotator_pb2.AnnotateImageResponse() - annotations = self._make_one().from_pb(image_response) - self.assertEqual(annotations.labels, []) - self.assertEqual(annotations.logos, []) - self.assertEqual(annotations.faces, []) - self.assertEqual(annotations.landmarks, []) - self.assertEqual(annotations.texts, []) - self.assertIsNone(annotations.properties) - - self.assertIsInstance(annotations.safe_searches, SafeSearchAnnotation) - safe_search = annotations.safe_searches - unknown = Likelihood.UNKNOWN - self.assertIs(safe_search.adult, unknown) - self.assertIs(safe_search.spoof, unknown) - self.assertIs(safe_search.medical, unknown) - self.assertIs(safe_search.violence, unknown) - - -class Test__make_entity_from_pb(unittest.TestCase): - def _call_fut(self, annotations): - from google.cloud.vision.annotations import _make_entity_from_pb - - return _make_entity_from_pb(annotations) - - def test_it(self): - description = 'testing 1 2 3' - locale = 'US' - mid = 'm/w/45342234' - score = 0.390625 - entity_annotation = _make_pb_entity() - entities = self._call_fut([entity_annotation]) - self.assertEqual(len(entities), 1) - entity = entities[0] - self.assertEqual(entity.description, description) - self.assertEqual(entity.mid, mid) - self.assertEqual(entity.locale, locale) - self.assertEqual(entity.score, score) - self.assertEqual(len(entity.bounds.vertices), 1) - self.assertEqual(entity.bounds.vertices[0].x_coordinate, 1) - self.assertEqual(entity.bounds.vertices[0].y_coordinate, 2) - self.assertEqual(len(entity.locations), 1) - self.assertEqual(entity.locations[0].latitude, 1.0) - self.assertEqual(entity.locations[0].longitude, 2.0) - - -class Test__make_faces_from_pb(unittest.TestCase): - def _call_fut(self, annotations): - from google.cloud.vision.annotations import _make_faces_from_pb - - return _make_faces_from_pb(annotations) - - def test_it(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.cloud.vision.face import Face - - faces_pb = [image_annotator_pb2.FaceAnnotation()] - - faces = self._call_fut(faces_pb) - self.assertIsInstance(faces[0], Face) - - -class Test__make_image_properties_from_pb(unittest.TestCase): - def _call_fut(self, annotations): - from google.cloud.vision.annotations import ( - _make_image_properties_from_pb) - - return _make_image_properties_from_pb(annotations) - - def test_it(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.protobuf.wrappers_pb2 import FloatValue - from google.type.color_pb2 import Color - - alpha = FloatValue(value=1.0) - color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) - color_info_pb = image_annotator_pb2.ColorInfo(color=color_pb, - score=1.0, - pixel_fraction=1.0) - dominant_colors = image_annotator_pb2.DominantColorsAnnotation( - colors=[color_info_pb]) - - image_properties_pb = image_annotator_pb2.ImageProperties( - dominant_colors=dominant_colors) - - image_properties = self._call_fut(image_properties_pb) - self.assertEqual(image_properties.colors[0].pixel_fraction, 1.0) - self.assertEqual(image_properties.colors[0].score, 1.0) - self.assertEqual(image_properties.colors[0].color.red, 1.0) - self.assertEqual(image_properties.colors[0].color.green, 2.0) - self.assertEqual(image_properties.colors[0].color.blue, 3.0) - self.assertEqual(image_properties.colors[0].color.alpha, 1.0) - - -class Test__process_image_annotations(unittest.TestCase): - def _call_fut(self, image): - from google.cloud.vision.annotations import _process_image_annotations - - return _process_image_annotations(image) - - def test_it(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - description = 'testing 1 2 3' - locale = 'US' - mid = 'm/w/45342234' - score = 0.390625 - entity_annotation = _make_pb_entity() - - image_response = image_annotator_pb2.AnnotateImageResponse( - label_annotations=[entity_annotation]) - - annotations = self._call_fut(image_response) - self.assertEqual(len(annotations['labels']), 1) - entity = annotations['labels'][0] - - self.assertEqual(entity.description, description) - self.assertEqual(entity.mid, mid) - self.assertEqual(entity.locale, locale) - self.assertEqual(entity.score, score) - self.assertEqual(len(entity.bounds.vertices), 1) - self.assertEqual(entity.bounds.vertices[0].x_coordinate, 1) - self.assertEqual(entity.bounds.vertices[0].y_coordinate, 2) - self.assertEqual(len(entity.locations), 1) - self.assertEqual(entity.locations[0].latitude, 1.0) - self.assertEqual(entity.locations[0].longitude, 2.0) diff --git a/vision/tests/unit/test_batch.py b/vision/tests/unit/test_batch.py deleted file mode 100644 index bda1148eca7ef..0000000000000 --- a/vision/tests/unit/test_batch.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - -PROJECT = 'PROJECT' - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -class TestBatch(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.batch import Batch - - return Batch - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from google.cloud.vision.image import Image - - client = mock.Mock(spec=[]) - image = Image(client, source_uri='gs://images/imageone.jpg') - face_feature = Feature(FeatureTypes.FACE_DETECTION, 5) - logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 3) - - batch = self._make_one(client) - batch.add_image(image, [logo_feature, face_feature]) - self.assertEqual(len(batch.images), 1) - self.assertEqual(len(batch.images[0]), 2) - self.assertIsInstance(batch.images[0][0], Image) - self.assertEqual(len(batch.images[0][1]), 2) - self.assertIsInstance(batch.images[0][1][0], Feature) - self.assertIsInstance(batch.images[0][1][1], Feature) - - def test_batch_from_client(self): - from google.cloud.vision.client import Client - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - - creds = _make_credentials() - client = Client(project=PROJECT, credentials=creds) - - image_one = client.image(source_uri='gs://images/imageone.jpg') - image_two = client.image(source_uri='gs://images/imagtwo.jpg') - face_feature = Feature(FeatureTypes.FACE_DETECTION, 5) - logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 3) - - # Make mocks. - annotate = mock.Mock(return_value=True, spec=[]) - vision_api = mock.Mock(annotate=annotate, spec=['annotate']) - client._vision_api_internal = vision_api - - # Actually call the partially-mocked method. - batch = client.batch() - batch.add_image(image_one, [face_feature]) - batch.add_image(image_two, [logo_feature, face_feature]) - images = batch.images - self.assertEqual(len(images), 2) - self.assertTrue(batch.detect()) - self.assertEqual(len(batch.images), 0) - client._vision_api_internal.annotate.assert_called_with(images) diff --git a/vision/tests/unit/test_client.py b/vision/tests/unit/test_client.py deleted file mode 100644 index 45690e5f88c40..0000000000000 --- a/vision/tests/unit/test_client.py +++ /dev/null @@ -1,633 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import unittest - -import mock - - -IMAGE_CONTENT = b'/9j/4QNURXhpZgAASUkq' -IMAGE_SOURCE = 'gs://some/image.jpg' -PROJECT = 'PROJECT' -B64_IMAGE_CONTENT = base64.b64encode(IMAGE_CONTENT).decode('ascii') - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -class TestClient(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.client import Client - - return Client - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - creds = _make_credentials() - client = self._make_one(project=PROJECT, credentials=creds) - self.assertEqual(client.project, PROJECT) - - def test_annotate_with_preset_api(self): - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials) - vision_api = client._vision_api - vision_api._connection = _Connection() - - annotate = mock.Mock(return_value=mock.sentinel.annotated, spec=[]) - api = mock.Mock(annotate=annotate, spec=['annotate']) - - client._vision_api_internal = api - client._vision_api.annotate() - annotate.assert_called_once_with() - - def test_make_gax_client(self): - from google.cloud.vision._gax import _GAPICVisionAPI - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=None) - vision_api = client._vision_api - vision_api._connection = _Connection() - with mock.patch('google.cloud.vision.client._GAPICVisionAPI', - spec=True): - self.assertIsInstance(client._vision_api, _GAPICVisionAPI) - - def test_make_http_client(self): - from google.cloud.vision._http import _HTTPVisionAPI - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - self.assertIsInstance(client._vision_api, _HTTPVisionAPI) - - def test_face_annotation(self): - from google.cloud.vision.annotations import Annotations - from google.cloud.vision.feature import Feature, FeatureTypes - from tests.unit._fixtures import FACE_DETECTION_RESPONSE - - returned = FACE_DETECTION_RESPONSE - request = { - "requests": [ - { - "image": { - "content": B64_IMAGE_CONTENT, - }, - "features": [ - { - "maxResults": 3, - "type": "FACE_DETECTION", - }, - ], - }, - ], - } - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(returned) - vision_api._connection = connection - - features = [ - Feature(feature_type=FeatureTypes.FACE_DETECTION, max_results=3), - ] - image = client.image(content=IMAGE_CONTENT) - images = ((image, features),) - api_response = client._vision_api.annotate(images) - - self.assertEqual(len(api_response), 1) - response = api_response[0] - self.assertEqual( - request, connection._requested[0]['data']) - self.assertIsInstance(response, Annotations) - - def test_image_with_client_gcs_source(self): - from google.cloud.vision.image import Image - - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials) - gcs_image = client.image(source_uri=IMAGE_SOURCE) - self.assertIsInstance(gcs_image, Image) - self.assertEqual(gcs_image.source, IMAGE_SOURCE) - - def test_image_with_client_raw_content(self): - from google.cloud.vision.image import Image - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - raw_image = client.image(content=IMAGE_CONTENT) - self.assertIsInstance(raw_image, Image) - self.assertEqual(raw_image.content, IMAGE_CONTENT) - - def test_image_with_client_filename(self): - from mock import mock_open - from mock import patch - from google.cloud.vision.image import Image - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - with patch('google.cloud.vision.image.open', - mock_open(read_data=IMAGE_CONTENT)) as m: - file_image = client.image(filename='my_image.jpg') - m.assert_called_once_with('my_image.jpg', 'rb') - self.assertIsInstance(file_image, Image) - self.assertEqual(file_image.content, IMAGE_CONTENT) - - def test_multiple_detection_from_content(self): - import copy - from google.cloud.vision.feature import Feature - from google.cloud.vision.feature import FeatureTypes - from tests.unit._fixtures import LABEL_DETECTION_RESPONSE - from tests.unit._fixtures import LOGO_DETECTION_RESPONSE - - returned = copy.deepcopy(LABEL_DETECTION_RESPONSE) - logos = copy.deepcopy(LOGO_DETECTION_RESPONSE['responses'][0]) - returned['responses'][0]['logoAnnotations'] = logos['logoAnnotations'] - - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials, - _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(returned) - vision_api._connection = connection - - limit = 2 - label_feature = Feature(FeatureTypes.LABEL_DETECTION, limit) - logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit) - features = [label_feature, logo_feature] - image = client.image(content=IMAGE_CONTENT) - detected_items = image.detect(features) - - self.assertEqual(len(detected_items), 1) - items = detected_items[0] - self.assertEqual(len(items.logos), 2) - self.assertEqual(len(items.labels), 3) - first_logo = items.logos[0] - second_logo = items.logos[1] - self.assertEqual(first_logo.description, 'Brand1') - self.assertEqual(first_logo.score, 0.63192177) - self.assertEqual(second_logo.description, 'Brand2') - self.assertEqual(second_logo.score, 0.5492993) - - first_label = items.labels[0] - second_label = items.labels[1] - third_label = items.labels[2] - self.assertEqual(first_label.description, 'automobile') - self.assertEqual(first_label.score, 0.9776855) - self.assertEqual(second_label.description, 'vehicle') - self.assertEqual(second_label.score, 0.947987) - self.assertEqual(third_label.description, 'truck') - self.assertEqual(third_label.score, 0.88429511) - - requested = connection._requested - requests = requested[0]['data']['requests'] - image_request = requests[0] - label_request = image_request['features'][0] - logo_request = image_request['features'][1] - - self.assertEqual(B64_IMAGE_CONTENT, - image_request['image']['content']) - self.assertEqual(label_request['maxResults'], 2) - self.assertEqual(label_request['type'], 'LABEL_DETECTION') - self.assertEqual(logo_request['maxResults'], 2) - self.assertEqual(logo_request['type'], 'LOGO_DETECTION') - - def test_detect_crop_hints_from_source(self): - from google.cloud.vision.crop_hint import CropHint - from tests.unit._fixtures import CROP_HINTS_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - api = client._vision_api - api._connection = _Connection(CROP_HINTS_RESPONSE) - image = client.image(source_uri=IMAGE_SOURCE) - crop_hints = image.detect_crop_hints(aspect_ratios=[1.3333], limit=3) - - self.assertEqual(len(crop_hints), 2) - self.assertIsInstance(crop_hints[0], CropHint) - image_request = api._connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcsImageUri'], IMAGE_SOURCE) - - crop_hints = image_request['imageContext']['cropHintsParams'] - ratios = crop_hints['aspectRatios'] - self.assertAlmostEqual(ratios[0], 1.3333, 4) - self.assertEqual(image_request['features'][0]['maxResults'], 3) - - def test_face_detection_from_source(self): - from google.cloud.vision.face import Face - from tests.unit._fixtures import FACE_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(FACE_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - faces = image.detect_faces(limit=3) - self.assertEqual(len(faces), 5) - for face in faces: - self.assertIsInstance(face, Face) - - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - IMAGE_SOURCE, image_request['image']['source']['gcs_image_uri']) - self.assertEqual(image_request['features'][0]['maxResults'], 3) - - def test_face_detection_from_content(self): - from google.cloud.vision.face import Face - from tests.unit._fixtures import FACE_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(FACE_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(content=IMAGE_CONTENT) - faces = image.detect_faces(limit=5) - self.assertEqual(len(faces), 5) - for face in faces: - self.assertIsInstance(face, Face) - - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual(B64_IMAGE_CONTENT, image_request['image']['content']) - self.assertEqual(image_request['features'][0]['maxResults'], 5) - - def test_face_detection_from_content_no_results(self): - returned = { - 'responses': [{}] - } - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(returned) - vision_api._connection = connection - - image = client.image(content=IMAGE_CONTENT) - faces = image.detect_faces(limit=5) - self.assertEqual(faces, ()) - self.assertEqual(len(faces), 0) - - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual(B64_IMAGE_CONTENT, image_request['image']['content']) - self.assertEqual(image_request['features'][0]['maxResults'], 5) - - def test_detect_full_text_annotation(self): - from google.cloud.vision.text import TextAnnotation - from tests.unit._fixtures import FULL_TEXT_RESPONSE - - returned = FULL_TEXT_RESPONSE - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - api = client._vision_api - api._connection = _Connection(returned) - image = client.image(source_uri=IMAGE_SOURCE) - full_text = image.detect_full_text(language_hints=['en'], limit=2) - - self.assertIsInstance(full_text, TextAnnotation) - self.assertEqual(full_text.text, 'The Republic\nBy Plato') - self.assertEqual(len(full_text.pages), 1) - self.assertEqual(len(full_text.pages), 1) - page = full_text.pages[0] - self.assertEqual(page.height, 1872) - self.assertEqual(page.width, 792) - self.assertEqual(len(page.blocks), 1) - self.assertEqual(len(page.blocks[0].paragraphs), 1) - self.assertEqual(len(page.blocks[0].paragraphs[0].words), 1) - - image_request = api._connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcsImageUri'], IMAGE_SOURCE) - self.assertEqual( - len(image_request['imageContext']['languageHints']), 1) - self.assertEqual( - image_request['imageContext']['languageHints'][0], 'en') - self.assertEqual(image_request['features'][0]['maxResults'], 2) - self.assertEqual( - image_request['features'][0]['type'], 'DOCUMENT_TEXT_DETECTION') - - def test_label_detection_from_source(self): - from google.cloud.vision.entity import EntityAnnotation - from tests.unit._fixtures import LABEL_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(LABEL_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - labels = image.detect_labels(limit=3) - self.assertEqual(len(labels), 3) - for label in labels: - self.assertIsInstance(label, EntityAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - self.assertEqual(image_request['features'][0]['maxResults'], 3) - self.assertEqual(labels[0].description, 'automobile') - self.assertEqual(labels[1].description, 'vehicle') - self.assertEqual(labels[0].mid, '/m/0k4j') - self.assertEqual(labels[1].mid, '/m/07yv9') - - def test_label_detection_no_results(self): - returned = { - 'responses': [{}] - } - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - vision_api._connection = _Connection(returned) - - image = client.image(content=IMAGE_CONTENT) - labels = image.detect_labels() - self.assertEqual(labels, ()) - self.assertEqual(len(labels), 0) - - def test_landmark_detection_from_source(self): - from google.cloud.vision.entity import EntityAnnotation - from tests.unit._fixtures import LANDMARK_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials, - _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(LANDMARK_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - landmarks = image.detect_landmarks(limit=3) - self.assertEqual(len(landmarks), 2) - - for landmark in landmarks: - self.assertIsInstance(landmark, EntityAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - self.assertEqual(image_request['features'][0]['maxResults'], 3) - self.assertEqual(landmarks[0].locations[0].latitude, 48.861013) - self.assertEqual(landmarks[0].locations[0].longitude, 2.335818) - self.assertEqual(landmarks[0].mid, '/m/04gdr') - self.assertEqual(landmarks[1].mid, '/m/094llg') - - def test_landmark_detection_from_content(self): - from google.cloud.vision.entity import EntityAnnotation - from tests.unit._fixtures import LANDMARK_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(LANDMARK_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(content=IMAGE_CONTENT) - landmarks = image.detect_landmarks(limit=5) - self.assertEqual(len(landmarks), 2) - for landmark in landmarks: - self.assertIsInstance(landmark, EntityAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual(image_request['image']['content'], B64_IMAGE_CONTENT) - self.assertEqual(image_request['features'][0]['maxResults'], 5) - - def test_landmark_detection_no_results(self): - returned = { - 'responses': [{}] - } - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - vision_api._connection = _Connection(returned) - - image = client.image(content=IMAGE_CONTENT) - landmarks = image.detect_landmarks() - self.assertEqual(landmarks, ()) - self.assertEqual(len(landmarks), 0) - - def test_logo_detection_from_source(self): - from google.cloud.vision.entity import EntityAnnotation - from tests.unit._fixtures import LOGO_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(LOGO_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - logos = image.detect_logos(limit=3) - self.assertEqual(len(logos), 2) - for logo in logos: - self.assertIsInstance(logo, EntityAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - self.assertEqual(image_request['features'][0]['maxResults'], 3) - - def test_logo_detection_from_content(self): - from google.cloud.vision.entity import EntityAnnotation - from tests.unit._fixtures import LOGO_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(LOGO_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(content=IMAGE_CONTENT) - logos = image.detect_logos(limit=5) - self.assertEqual(len(logos), 2) - for logo in logos: - self.assertIsInstance(logo, EntityAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual(image_request['image']['content'], B64_IMAGE_CONTENT) - self.assertEqual(image_request['features'][0]['maxResults'], 5) - - def test_text_detection_from_source(self): - from google.cloud.vision.entity import EntityAnnotation - from tests.unit._fixtures import TEXT_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(TEXT_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - text = image.detect_text(limit=3) - self.assertEqual(3, len(text)) - self.assertIsInstance(text[0], EntityAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - self.assertEqual(image_request['features'][0]['maxResults'], 3) - self.assertEqual(text[0].locale, 'en') - self.assertEqual(text[0].description, 'Google CloudPlatform\n') - self.assertEqual(text[1].description, 'Google') - self.assertEqual(text[0].bounds.vertices[0].x_coordinate, 129) - self.assertEqual(text[0].bounds.vertices[0].y_coordinate, 694) - - def test_safe_search_detection_from_source(self): - from google.cloud.vision.likelihood import Likelihood - from google.cloud.vision.safe_search import SafeSearchAnnotation - from tests.unit._fixtures import SAFE_SEARCH_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(SAFE_SEARCH_DETECTION_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - safe_search = image.detect_safe_search() - self.assertIsInstance(safe_search, SafeSearchAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - - self.assertIs(safe_search.adult, Likelihood.VERY_UNLIKELY) - self.assertIs(safe_search.spoof, Likelihood.UNLIKELY) - self.assertIs(safe_search.medical, Likelihood.POSSIBLE) - self.assertIs(safe_search.violence, Likelihood.VERY_UNLIKELY) - - def test_safe_search_no_results(self): - returned = { - 'responses': [{}] - } - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - vision_api._connection = _Connection(returned) - - image = client.image(content=IMAGE_CONTENT) - safe_search = image.detect_safe_search() - self.assertEqual(safe_search, ()) - self.assertEqual(len(safe_search), 0) - - def test_image_properties_detection_from_source(self): - from google.cloud.vision.color import ImagePropertiesAnnotation - from tests.unit._fixtures import IMAGE_PROPERTIES_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - connection = _Connection(IMAGE_PROPERTIES_RESPONSE) - vision_api._connection = connection - - image = client.image(source_uri=IMAGE_SOURCE) - image_properties = image.detect_properties() - self.assertIsInstance(image_properties, ImagePropertiesAnnotation) - image_request = connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - self.assertEqual(image_properties.colors[0].score, 0.42258179) - self.assertEqual( - image_properties.colors[0].pixel_fraction, 0.025376344) - self.assertEqual(image_properties.colors[0].color.red, 253) - self.assertEqual(image_properties.colors[0].color.green, 203) - self.assertEqual(image_properties.colors[0].color.blue, 65) - self.assertEqual(image_properties.colors[0].color.alpha, 0.0) - - def test_image_properties_no_results(self): - returned = { - 'responses': [{}] - } - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - vision_api = client._vision_api - vision_api._connection = _Connection(returned) - - image = client.image(content=IMAGE_CONTENT) - image_properties = image.detect_properties() - self.assertEqual(image_properties, ()) - self.assertEqual(len(image_properties), 0) - - def test_detect_web_detection(self): - from google.cloud.vision.web import WebEntity - from google.cloud.vision.web import WebImage - from google.cloud.vision.web import WebPage - from tests.unit._fixtures import WEB_DETECTION_RESPONSE - - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials, _use_grpc=False) - api = client._vision_api - api._connection = _Connection(WEB_DETECTION_RESPONSE) - image = client.image(source_uri=IMAGE_SOURCE) - web_images = image.detect_web(limit=2) - - self.assertEqual(len(web_images.partial_matching_images), 2) - self.assertEqual(len(web_images.full_matching_images), 2) - self.assertEqual(len(web_images.web_entities), 2) - self.assertEqual(len(web_images.pages_with_matching_images), 2) - - for partial_match in web_images.partial_matching_images: - self.assertIsInstance(partial_match, WebImage) - - for full_match in web_images.full_matching_images: - self.assertIsInstance(full_match, WebImage) - - for web_entity in web_images.web_entities: - self.assertIsInstance(web_entity, WebEntity) - - for page in web_images.pages_with_matching_images: - self.assertIsInstance(page, WebPage) - - image_request = api._connection._requested[0]['data']['requests'][0] - self.assertEqual( - image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) - self.assertEqual(image_request['features'][0]['maxResults'], 2) - self.assertEqual( - image_request['features'][0]['type'], 'WEB_DETECTION') - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - import json - - json.dumps(kw.get('data', '')) # Simulate JSON encoding. - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/vision/tests/unit/test_color.py b/vision/tests/unit/test_color.py deleted file mode 100644 index 17541524dac9a..0000000000000 --- a/vision/tests/unit/test_color.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestColor(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.color import Color - - return Color - - def test_rgb_color_data(self): - colors = { - 'red': 255, - 'green': 255, - 'blue': 255, - 'alpha': 0.5, - } - color_class = self._get_target_class() - colors = color_class.from_api_repr(colors) - - self.assertIsInstance(colors.red, float) - self.assertIsInstance(colors.green, float) - self.assertIsInstance(colors.blue, float) - self.assertIsInstance(colors.alpha, float) - self.assertEqual(colors.red, 255.0) - self.assertEqual(colors.green, 255.0) - self.assertEqual(colors.blue, 255.0) - self.assertEqual(colors.alpha, 0.5) - - def test_empty_pb_rgb_color_data(self): - from google.type.color_pb2 import Color - - color_pb = Color() - color_class = self._get_target_class() - color = color_class.from_pb(color_pb) - self.assertEqual(color.red, 0.0) - self.assertEqual(color.green, 0.0) - self.assertEqual(color.blue, 0.0) - self.assertEqual(color.alpha, 0.0) - - def test_pb_rgb_color_data(self): - from google.protobuf.wrappers_pb2 import FloatValue - from google.type.color_pb2 import Color - - alpha = FloatValue(value=1.0) - color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) - color_class = self._get_target_class() - color = color_class.from_pb(color_pb) - self.assertEqual(color.red, 1.0) - self.assertEqual(color.green, 2.0) - self.assertEqual(color.blue, 3.0) - self.assertEqual(color.alpha, 1.0) - - def test_pb_rgb_color_no_alpha_data(self): - from google.protobuf.wrappers_pb2 import FloatValue - from google.type.color_pb2 import Color - - alpha = FloatValue() - color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) - color_class = self._get_target_class() - color = color_class.from_pb(color_pb) - self.assertEqual(color.red, 1.0) - self.assertEqual(color.green, 2.0) - self.assertEqual(color.blue, 3.0) - self.assertEqual(color.alpha, 0.0) - - def test_missing_rgb_values(self): - colors = {} - color_class = self._get_target_class() - colors = color_class.from_api_repr(colors) - - self.assertEqual(colors.red, 0) - self.assertEqual(colors.green, 0) - self.assertEqual(colors.blue, 0) - self.assertEqual(colors.alpha, 0.0) - - -class TestImagePropertiesAnnotation(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.color import ImagePropertiesAnnotation - - return ImagePropertiesAnnotation - - def test_image_properties_annotation_from_pb(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.protobuf.wrappers_pb2 import FloatValue - from google.type.color_pb2 import Color - - alpha = FloatValue(value=1.0) - color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) - color_info_pb = image_annotator_pb2.ColorInfo( - color=color_pb, score=1.0, pixel_fraction=1.0) - dominant_colors = image_annotator_pb2.DominantColorsAnnotation( - colors=[color_info_pb]) - - image_properties_pb = image_annotator_pb2.ImageProperties( - dominant_colors=dominant_colors) - - color_info = self._get_target_class() - image_properties = color_info.from_pb(image_properties_pb) - - self.assertEqual(image_properties.colors[0].pixel_fraction, 1.0) - self.assertEqual(image_properties.colors[0].score, 1.0) - self.assertEqual(image_properties.colors[0].color.red, 1.0) - self.assertEqual(image_properties.colors[0].color.green, 2.0) - self.assertEqual(image_properties.colors[0].color.blue, 3.0) - self.assertEqual(image_properties.colors[0].color.alpha, 1.0) - - def test_empty_image_properties_annotation_from_pb(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - image_properties_pb = image_annotator_pb2.ImageProperties() - - color_info = self._get_target_class() - image_properties = color_info.from_pb(image_properties_pb) - self.assertIsNone(image_properties) diff --git a/vision/tests/unit/test_crop_hint.py b/vision/tests/unit/test_crop_hint.py deleted file mode 100644 index 15f10ce1eec21..0000000000000 --- a/vision/tests/unit/test_crop_hint.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestCropHint(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.crop_hint import CropHint - - return CropHint - - def test_crop_hint_annotation(self): - from tests.unit._fixtures import CROP_HINTS_RESPONSE - from google.cloud.vision.geometry import Bounds - - response = CROP_HINTS_RESPONSE['responses'][0]['cropHintsAnnotation'] - crop_hints_dict = response['cropHints'][0] - crop_hints_class = self._get_target_class() - crop_hints = crop_hints_class.from_api_repr(crop_hints_dict) - - self.assertIsInstance(crop_hints.bounds, Bounds) - self.assertEqual(len(crop_hints.bounds.vertices), 4) - self.assertEqual(crop_hints.confidence, 0.5) - self.assertEqual(crop_hints.importance_fraction, 1.22) - - def test_crop_hint_annotation_pb(self): - from google.cloud.proto.vision.v1 import geometry_pb2 - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - vertex = geometry_pb2.Vertex(x=1, y=2) - bounds = geometry_pb2.BoundingPoly(vertices=[vertex]) - crop_hint_pb = image_annotator_pb2.CropHint( - bounding_poly=bounds, confidence=1.23, importance_fraction=4.56) - crop_hints_class = self._get_target_class() - crop_hint = crop_hints_class.from_pb(crop_hint_pb) - - self.assertEqual(len(crop_hint.bounds.vertices), 1) - vertex = crop_hint.bounds.vertices[0] - self.assertEqual(vertex.x_coordinate, 1) - self.assertEqual(vertex.y_coordinate, 2) - self.assertAlmostEqual(crop_hint.confidence, 1.23, 4) - self.assertAlmostEqual(crop_hint.importance_fraction, 4.56, 4) diff --git a/vision/tests/unit/test_entity.py b/vision/tests/unit/test_entity.py deleted file mode 100644 index d5b0465d31c99..0000000000000 --- a/vision/tests/unit/test_entity.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestEntityAnnotation(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.entity import EntityAnnotation - return EntityAnnotation - - def test_logo_annotation(self): - from tests.unit._fixtures import LOGO_DETECTION_RESPONSE - - entity_class = self._get_target_class() - logo = entity_class.from_api_repr( - LOGO_DETECTION_RESPONSE['responses'][0]['logoAnnotations'][0]) - - self.assertEqual(logo.mid, '/m/05b5c') - self.assertEqual(logo.description, 'Brand1') - self.assertEqual(logo.score, 0.63192177) - self.assertEqual(logo.bounds.vertices[0].x_coordinate, 78) - self.assertEqual(logo.bounds.vertices[0].y_coordinate, 162) - - def test_logo_pb_annotation(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - description = 'testing 1 2 3' - locale = 'US' - mid = 'm/w/45342234' - score = 0.875 - entity_annotation = image_annotator_pb2.EntityAnnotation() - entity_annotation.mid = mid - entity_annotation.locale = locale - entity_annotation.description = description - entity_annotation.score = score - entity_annotation.bounding_poly.vertices.add() - entity_annotation.bounding_poly.vertices[0].x = 1 - entity_annotation.bounding_poly.vertices[0].y = 2 - entity_annotation.locations.add() - entity_annotation.locations[0].lat_lng.latitude = 1.0 - entity_annotation.locations[0].lat_lng.longitude = 2.0 - - entity_class = self._get_target_class() - entity = entity_class.from_pb(entity_annotation) - - self.assertEqual(entity.description, description) - self.assertEqual(entity.mid, mid) - self.assertEqual(entity.locale, locale) - self.assertEqual(entity.score, score) - self.assertEqual(entity.bounds.vertices[0].x_coordinate, 1) - self.assertEqual(entity.bounds.vertices[0].y_coordinate, 2) - self.assertEqual(entity.locations[0].latitude, 1.0) - self.assertEqual(entity.locations[0].longitude, 2.0) diff --git a/vision/tests/unit/test_face.py b/vision/tests/unit/test_face.py deleted file mode 100644 index 8773a00764de2..0000000000000 --- a/vision/tests/unit/test_face.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestFace(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.face import Face - - return Face - - def _make_face_pb(self, *args, **kwargs): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - return image_annotator_pb2.FaceAnnotation(*args, **kwargs) - - def setUp(self): - from tests.unit._fixtures import FACE_DETECTION_RESPONSE - - self.face_annotations = FACE_DETECTION_RESPONSE['responses'][0] - self.face_class = self._get_target_class() - self.face = self.face_class.from_api_repr( - self.face_annotations['faceAnnotations'][0]) - - def test_face_from_pb(self): - from google.cloud.proto.vision.v1 import image_annotator_pb2 - from google.cloud.proto.vision.v1 import geometry_pb2 - - position_pb = geometry_pb2.Position(x=1.0, y=2.0, z=3.0) - landmark_pb = image_annotator_pb2.FaceAnnotation.Landmark( - position=position_pb, type=5) - face_pb = self._make_face_pb(landmarks=[landmark_pb]) - - face = self._get_target_class().from_pb(face_pb) - self.assertIsInstance(face, self._get_target_class()) - - def test_face_landmarks(self): - from google.cloud.vision.face import LandmarkTypes - - self.assertEqual(self.face.landmarking_confidence, 0.54453093) - self.assertEqual(self.face.detection_confidence, 0.9863683) - self.assertTrue(hasattr(self.face.landmarks, 'left_eye')) - left_eye = self.face.landmarks.left_eye - self.assertEqual(left_eye.position.x_coordinate, 1004.8003) - self.assertEqual(left_eye.position.y_coordinate, 482.69385) - self.assertEqual(left_eye.position.z_coordinate, 0.0016593217) - self.assertEqual(left_eye.landmark_type, LandmarkTypes.LEFT_EYE) - - def test_facial_emotions(self): - from google.cloud.vision.face import Likelihood - - self.assertEqual(self.face.joy, Likelihood.VERY_LIKELY) - self.assertEqual(self.face.sorrow, Likelihood.VERY_UNLIKELY) - self.assertEqual(self.face.surprise, Likelihood.VERY_UNLIKELY) - self.assertEqual(self.face.anger, Likelihood.VERY_UNLIKELY) - - def test_facial_angles(self): - self.assertEqual(self.face.angles.roll, -0.43419784) - self.assertEqual(self.face.angles.pan, 6.027647) - self.assertEqual(self.face.angles.tilt, -18.412321) - - def test_face_headware_and_blur_and_underexposed(self): - from google.cloud.vision.face import Likelihood - - very_unlikely = Likelihood.VERY_UNLIKELY - image_properties = self.face.image_properties - self.assertEqual(image_properties.blurred, very_unlikely) - self.assertEqual(image_properties.underexposed, very_unlikely) - self.assertEqual(self.face.headwear, Likelihood.VERY_UNLIKELY) - - def test_face_bounds(self): - self.assertEqual(len(self.face.bounds.vertices), 4) - vertex = self.face.bounds.vertices[0] - self.assertEqual(vertex.x_coordinate, 748) - self.assertEqual(vertex.y_coordinate, 58) - - def test_facial_skin_bounds(self): - self.assertEqual(len(self.face.fd_bounds.vertices), 4) - vertex = self.face.bounds.vertices[1] - self.assertEqual(vertex.x_coordinate, 1430) - self.assertEqual(vertex.y_coordinate, 58) diff --git a/vision/tests/unit/test_feature.py b/vision/tests/unit/test_feature.py deleted file mode 100644 index 322b5c6f52ac2..0000000000000 --- a/vision/tests/unit/test_feature.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestFeature(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.feature import Feature - - return Feature - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_construct_feature(self): - from google.cloud.vision.feature import FeatureTypes - - feature = self._make_one(FeatureTypes.LABEL_DETECTION) - self.assertEqual(feature.max_results, 1) - self.assertEqual(feature.feature_type, 'LABEL_DETECTION') - - feature = self._make_one(FeatureTypes.FACE_DETECTION, 3) - self.assertEqual(feature.max_results, 3) - self.assertEqual(feature.feature_type, 'FACE_DETECTION') - - def test_feature_as_dict(self): - from google.cloud.vision.feature import FeatureTypes - - feature = self._make_one(FeatureTypes.FACE_DETECTION, max_results=5) - expected = { - 'type': 'FACE_DETECTION', - 'maxResults': 5 - } - self.assertEqual(feature.as_dict(), expected) - - def test_bad_feature_type(self): - with self.assertRaises(AttributeError): - self._make_one('something_not_feature_type', - max_results=5) diff --git a/vision/tests/unit/test_geometry.py b/vision/tests/unit/test_geometry.py deleted file mode 100644 index 6b4e8c50e0d30..0000000000000 --- a/vision/tests/unit/test_geometry.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestVertex(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.geometry import Vertex - - return Vertex - - def _make_one(self, x_coordinate, y_coordinate): - return self._get_target_class()(x_coordinate, y_coordinate) - - def test_vertex_with_zeros(self): - vertex = self._make_one(0.0, 0.0) - self.assertEqual(vertex.x_coordinate, 0.0) - self.assertEqual(vertex.y_coordinate, 0.0) diff --git a/vision/tests/unit/test_image.py b/vision/tests/unit/test_image.py deleted file mode 100644 index 2435bc39ac4bd..0000000000000 --- a/vision/tests/unit/test_image.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import unittest - -from google.cloud._helpers import _to_bytes -from google.cloud._helpers import _bytes_to_unicode - -IMAGE_SOURCE = 'gs://some/image.jpg' -IMAGE_CONTENT = _to_bytes('/9j/4QNURXhpZgAASUkq') -B64_IMAGE_CONTENT = _bytes_to_unicode(base64.b64encode(IMAGE_CONTENT)) -CLIENT_MOCK = {'source': ''} - - -class TestVisionImage(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.image import Image - - return Image - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_must_set_one_source(self): - with self.assertRaises(ValueError): - self._make_one(CLIENT_MOCK) - - with self.assertRaises(ValueError): - self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT, - source_uri=IMAGE_SOURCE) - - with self.assertRaises(ValueError): - self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT, - source_uri=IMAGE_SOURCE, filename='myimage.jpg') - - image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) - self.assertEqual(image.content, IMAGE_CONTENT) - - def test_image_source_type_content(self): - image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) - - as_dict = { - 'content': B64_IMAGE_CONTENT, - } - - self.assertEqual(image.content, IMAGE_CONTENT) - self.assertIsNone(image.source) - self.assertEqual(image.as_dict(), as_dict) - - def test_image_source_type_google_cloud_storage(self): - image = self._make_one(CLIENT_MOCK, source_uri=IMAGE_SOURCE) - - as_dict = { - 'source': { - 'gcs_image_uri': IMAGE_SOURCE, - } - } - - self.assertEqual(IMAGE_SOURCE, image.source) - self.assertEqual(None, image.content) - self.assertEqual(image.as_dict(), as_dict) - - def test_image_source_type_image_url(self): - url = 'http://www.example.com/image.jpg' - image = self._make_one(CLIENT_MOCK, source_uri=url) - as_dict = { - 'source': { - 'image_uri': url, - }, - } - - self.assertEqual(image.source, url) - self.assertIsNone(image.content) - self.assertEqual(image.as_dict(), as_dict) - - def test_image_no_valid_image_data(self): - image = self._make_one(CLIENT_MOCK, source_uri='ftp://notsupported') - with self.assertRaises(ValueError): - image.as_dict() - - def test_cannot_set_both_source_and_content(self): - image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) - - self.assertEqual(image.content, IMAGE_CONTENT) - with self.assertRaises(AttributeError): - image.source = IMAGE_SOURCE - - image = self._make_one(CLIENT_MOCK, source_uri=IMAGE_SOURCE) - self.assertEqual(IMAGE_SOURCE, image.source) - with self.assertRaises(AttributeError): - image.content = IMAGE_CONTENT - - def test_image_from_filename(self): - from mock import mock_open - from mock import patch - - as_dict = { - 'content': B64_IMAGE_CONTENT, - } - - with patch('google.cloud.vision.image.open', - mock_open(read_data=IMAGE_CONTENT)) as m: - image = self._make_one(CLIENT_MOCK, filename='my-image-file.jpg') - m.assert_called_once_with('my-image-file.jpg', 'rb') - self.assertEqual(image.content, IMAGE_CONTENT) - self.assertEqual(image.as_dict(), as_dict) diff --git a/vision/tests/unit/test_safe_search.py b/vision/tests/unit/test_safe_search.py deleted file mode 100644 index 4d6d2882cb981..0000000000000 --- a/vision/tests/unit/test_safe_search.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestSafeSearchAnnotation(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.safe_search import SafeSearchAnnotation - - return SafeSearchAnnotation - - def test_safe_search_annotation(self): - from google.cloud.vision.likelihood import Likelihood - from tests.unit._fixtures import SAFE_SEARCH_DETECTION_RESPONSE - - response = SAFE_SEARCH_DETECTION_RESPONSE['responses'][0] - safe_search_response = response['safeSearchAnnotation'] - - safe_search = self._get_target_class().from_api_repr( - safe_search_response) - - self.assertIs(safe_search.adult, Likelihood.VERY_UNLIKELY) - self.assertIs(safe_search.spoof, Likelihood.UNLIKELY) - self.assertIs(safe_search.medical, Likelihood.POSSIBLE) - self.assertIs(safe_search.violence, Likelihood.VERY_UNLIKELY) - - def test_pb_safe_search_annotation(self): - from google.cloud.vision.likelihood import Likelihood - from google.cloud.proto.vision.v1.image_annotator_pb2 import ( - Likelihood as LikelihoodPB) - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - possible = LikelihoodPB.Value('POSSIBLE') - possible_name = Likelihood.POSSIBLE - safe_search_annotation = image_annotator_pb2.SafeSearchAnnotation( - adult=possible, spoof=possible, medical=possible, violence=possible - ) - - safe_search = self._get_target_class().from_pb(safe_search_annotation) - - self.assertIs(safe_search.adult, possible_name) - self.assertIs(safe_search.spoof, possible_name) - self.assertIs(safe_search.medical, possible_name) - self.assertIs(safe_search.violence, possible_name) - - def test_empty_pb_safe_search_annotation(self): - from google.cloud.vision.likelihood import Likelihood - from google.cloud.proto.vision.v1 import image_annotator_pb2 - - unknown = Likelihood.UNKNOWN - safe_search_annotation = image_annotator_pb2.SafeSearchAnnotation() - - safe_search = self._get_target_class().from_pb(safe_search_annotation) - - self.assertIs(safe_search.adult, unknown) - self.assertIs(safe_search.spoof, unknown) - self.assertIs(safe_search.medical, unknown) - self.assertIs(safe_search.violence, unknown) diff --git a/vision/tests/unit/test_text.py b/vision/tests/unit/test_text.py deleted file mode 100644 index 3b5df496d2992..0000000000000 --- a/vision/tests/unit/test_text.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestTextAnnotatin(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.text import TextAnnotation - return TextAnnotation - - def test_text_annotation_from_api_repr(self): - annotation = { - 'pages': [], - 'text': 'some detected text', - } - text_annotation = self._get_target_class().from_api_repr(annotation) - self.assertIsInstance(text_annotation, self._get_target_class()) - self.assertEqual(len(text_annotation.pages), 0) - self.assertEqual(text_annotation.text, annotation['text']) - - def test_text_annotation_from_pb(self): - from google.cloud.proto.vision.v1 import text_annotation_pb2 - - page = text_annotation_pb2.Page(width=8, height=11) - text = 'some detected text' - text_annotation_pb = text_annotation_pb2.TextAnnotation( - pages=[page], text=text) - - text_annotation = self._get_target_class().from_pb(text_annotation_pb) - self.assertIsInstance(text_annotation, self._get_target_class()) - self.assertEqual(len(text_annotation.pages), 1) - self.assertEqual(text_annotation.pages[0].width, 8) - self.assertEqual(text_annotation.pages[0].height, 11) - self.assertEqual(text_annotation.text, text) diff --git a/vision/tests/unit/test_web.py b/vision/tests/unit/test_web.py deleted file mode 100644 index 9f91d883b1b95..0000000000000 --- a/vision/tests/unit/test_web.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestWebDetection(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.web import WebDetection - return WebDetection - - def _make_one(self, web_entities, full_matching_images, - partial_matching_images, pages_with_matching_images): - return self._get_target_class()(web_entities, full_matching_images, - partial_matching_images, - pages_with_matching_images) - - def test_web_detection_ctor(self): - web_detection = self._make_one(1, 2, 3, 4) - self.assertEqual(web_detection.web_entities, 1) - self.assertEqual(web_detection.full_matching_images, 2) - self.assertEqual(web_detection.partial_matching_images, 3) - self.assertEqual(web_detection.pages_with_matching_images, 4) - - def test_web_detection_from_api_repr(self): - from google.cloud.vision.web import WebEntity - from google.cloud.vision.web import WebImage - from google.cloud.vision.web import WebPage - - web_detection_dict = { - 'partialMatchingImages': [{ - 'url': 'https://cloud.google.com/vision', - 'score': 0.92234, - }], - 'fullMatchingImages': [{ - 'url': 'https://cloud.google.com/vision', - 'score': 0.92234, - }], - 'webEntities': [{ - 'entityId': '/m/05_5t0l', - 'score': 0.9468027, - 'description': 'Landmark' - }], - 'pagesWithMatchingImages': [{ - 'url': 'https://cloud.google.com/vision', - 'score': 0.92234, - }], - } - web_detection = self._get_target_class().from_api_repr( - web_detection_dict) - self.assertEqual(len(web_detection.partial_matching_images), 1) - self.assertEqual(len(web_detection.full_matching_images), 1) - self.assertEqual(len(web_detection.web_entities), 1) - self.assertEqual(len(web_detection.pages_with_matching_images), 1) - - self.assertIsInstance(web_detection.partial_matching_images[0], - WebImage) - self.assertIsInstance(web_detection.full_matching_images[0], WebImage) - self.assertIsInstance(web_detection.web_entities[0], WebEntity) - self.assertIsInstance(web_detection.pages_with_matching_images[0], - WebPage) - - def test_web_detection_from_pb(self): - from google.cloud.proto.vision.v1 import web_detection_pb2 - from google.cloud.vision.web import WebEntity - from google.cloud.vision.web import WebImage - from google.cloud.vision.web import WebPage - - description = 'Some images like the image you have.' - entity_id = '/m/019dvv' - score = 1470.4435 - url = 'http://cloud.google.com/vision' - - web_entity_pb = web_detection_pb2.WebDetection.WebEntity( - entity_id=entity_id, score=score, description=description) - - web_image_pb = web_detection_pb2.WebDetection.WebImage( - url=url, score=score) - - web_page_pb = web_detection_pb2.WebDetection.WebPage( - url=url, score=score) - - web_detection_pb = web_detection_pb2.WebDetection( - web_entities=[web_entity_pb], full_matching_images=[web_image_pb], - partial_matching_images=[web_image_pb], - pages_with_matching_images=[web_page_pb]) - web_detection = self._get_target_class().from_pb(web_detection_pb) - self.assertEqual(len(web_detection.web_entities), 1) - self.assertEqual(len(web_detection.full_matching_images), 1) - self.assertEqual(len(web_detection.partial_matching_images), 1) - self.assertEqual(len(web_detection.pages_with_matching_images), 1) - self.assertIsInstance(web_detection.web_entities[0], WebEntity) - self.assertIsInstance(web_detection.full_matching_images[0], WebImage) - self.assertIsInstance(web_detection.partial_matching_images[0], - WebImage) - self.assertIsInstance(web_detection.pages_with_matching_images[0], - WebPage) - - -class TestWebEntity(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.web import WebEntity - return WebEntity - - def _make_one(self, entity_id, score, description): - return self._get_target_class()(entity_id, score, description) - - def test_web_entity_ctor(self): - entity_id = 'm/abc123' - score = 0.13245 - description = 'This is an image from the web that matches your image.' - web_entity = self._make_one(entity_id, score, description) - self.assertEqual(web_entity.entity_id, entity_id) - self.assertAlmostEqual(web_entity.score, score, 4) - self.assertEqual(web_entity.description, description) - - def test_web_entity_from_api_repr(self): - entity_dict = { - 'entityId': '/m/019dvv', - 'score': 1470.4435, - 'description': 'Mount Rushmore National Memorial', - } - web_entity = self._get_target_class().from_api_repr(entity_dict) - - self.assertEqual(web_entity.entity_id, entity_dict['entityId']) - self.assertAlmostEqual(web_entity.score, entity_dict['score'], 4) - self.assertEqual(web_entity.description, entity_dict['description']) - - def test_web_entity_from_pb(self): - from google.cloud.proto.vision.v1 import web_detection_pb2 - - entity_id = '/m/019dvv' - score = 1470.4435 - description = 'Some images like the image you have.' - web_entity_pb = web_detection_pb2.WebDetection.WebEntity( - entity_id=entity_id, score=score, description=description) - web_entity = self._get_target_class().from_pb(web_entity_pb) - self.assertEqual(web_entity.entity_id, entity_id) - self.assertAlmostEqual(web_entity.score, score, 4) - self.assertEqual(web_entity.description, description) - - -class TestWebImage(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.web import WebImage - return WebImage - - def _make_one(self, url, score): - return self._get_target_class()(url, score) - - def test_web_image_ctor(self): - url = 'http://cloud.google.com/vision' - score = 1234.23 - web_image = self._make_one(url, score) - self.assertEqual(web_image.url, url) - self.assertAlmostEqual(web_image.score, score, 4) - - def test_web_image_from_api_repr(self): - web_image_dict = { - 'url': 'http://cloud.google.com/vision', - 'score': 1234.23, - } - web_image = self._get_target_class().from_api_repr(web_image_dict) - self.assertEqual(web_image.url, web_image_dict['url']) - self.assertAlmostEqual(web_image.score, web_image_dict['score']) - - def test_web_image_from_pb(self): - from google.cloud.proto.vision.v1 import web_detection_pb2 - - url = 'http://cloud.google.com/vision' - score = 1234.23 - web_image_pb = web_detection_pb2.WebDetection.WebImage( - url=url, score=score) - web_image = self._get_target_class().from_pb(web_image_pb) - self.assertEqual(web_image.url, url) - self.assertAlmostEqual(web_image.score, score, 4) - - -class TestWebPage(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.vision.web import WebPage - return WebPage - - def _make_one(self, url, score): - return self._get_target_class()(url, score) - - def test_web_page_ctor(self): - url = 'http://cloud.google.com/vision' - score = 1234.23 - web_page = self._make_one(url, score) - self.assertEqual(web_page.url, url) - self.assertAlmostEqual(web_page.score, score, 4) - - def test_web_page_from_api_repr(self): - web_page_dict = { - 'url': 'http://cloud.google.com/vision', - 'score': 1234.23, - } - web_page = self._get_target_class().from_api_repr(web_page_dict) - self.assertEqual(web_page.url, web_page_dict['url']) - self.assertAlmostEqual(web_page.score, web_page_dict['score'], 4) - - def test_web_page_from_pb(self): - from google.cloud.proto.vision.v1 import web_detection_pb2 - - url = 'http://cloud.google.com/vision' - score = 1234.23 - web_page_pb = web_detection_pb2.WebDetection.WebPage( - url=url, score=score) - web_page = self._get_target_class().from_pb(web_page_pb) - self.assertEqual(web_page.url, url) - self.assertAlmostEqual(web_page.score, score, 4) diff --git a/vision/tests/vision/test_decorators.py b/vision/tests/vision/test_decorators.py new file mode 100644 index 0000000000000..0a1a4481f432e --- /dev/null +++ b/vision/tests/vision/test_decorators.py @@ -0,0 +1,66 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import unittest + +import mock + +from google.cloud import vision + + +class DecoratorTests(unittest.TestCase): + def test_noop_without_enums(self): + class A(object): + pass + APrime = vision.decorators.add_single_feature_methods(A) + + # It should be the same class object. + assert A is APrime + + # Nothing should have been added. + assert not hasattr(A, 'face_detection') + assert not hasattr(A, 'logo_detection') + + def test_with_enums(self): + class A(object): + enums = vision.enums + + # There should not be detection methods yet. + assert not hasattr(A, 'face_detection') + + # Add the detection methods. + APrime = vision.decorators.add_single_feature_methods(A) + assert A is APrime + + # There should be detection methods now. + assert hasattr(A, 'face_detection') + assert callable(A.face_detection) + + +class SingleFeatureMethodTests(unittest.TestCase): + @mock.patch.object(vision.ImageAnnotatorClient, 'annotate_image') + def test_runs_generic_single_image(self, ai): + ai.return_value = vision.image_annotator.AnnotateImageResponse() + + # Make a face detection request. + client = vision.ImageAnnotatorClient() + image = {'source': {'image_uri': 'gs://my-test-bucket/image.jpg'}} + response = client.face_detection(image) + + # Assert that the single-image method was called as expected. + ai.assert_called_once_with({ + 'features': [vision.enums.Feature.Type.FACE_DETECTION], + 'image': image, + }, options=None) diff --git a/vision/tests/vision/test_helpers.py b/vision/tests/vision/test_helpers.py new file mode 100644 index 0000000000000..39588dd887f4d --- /dev/null +++ b/vision/tests/vision/test_helpers.py @@ -0,0 +1,89 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import unittest + +import mock + +from google.auth.credentials import Credentials + +from google.cloud.vision_v1 import ImageAnnotatorClient +from google.cloud.vision_v1 import image_annotator + + + +class TestSingleImageHelper(unittest.TestCase): + def setUp(self): + credentials = mock.Mock(spec=Credentials) + self.client = ImageAnnotatorClient(credentials=credentials) + + @mock.patch.object(ImageAnnotatorClient, 'batch_annotate_images') + def test_all_features_default(self, batch_annotate): + # Set up an image annotation request with no features. + image = image_annotator.Image(source={ + 'image_uri': 'http://foo.com/img.jpg', + }) + request = image_annotator.AnnotateImageRequest(image=image) + assert not request.features + + # Perform the single image request. + self.client.annotate_image(request) + + # Evalute the argument sent to batch_annotate_images. + assert batch_annotate.call_count == 1 + _, args, kwargs = batch_annotate.mock_calls[0] + + # Only a single request object should be sent. + assert len(args[0]) == 1 + + # Evalute the request object to ensure it looks correct. + request_sent = args[0][0] + all_features = self.client._get_all_features() + assert request_sent.image is request.image + assert len(request_sent.features) == len(all_features) + + + @mock.patch.object(ImageAnnotatorClient, 'batch_annotate_images') + def test_explicit_features(self, batch_annotate): + # Set up an image annotation request with no features. + image = image_annotator.Image(source={ + 'image_uri': 'http://foo.com/img.jpg', + }) + request = image_annotator.AnnotateImageRequest( + image=image, + features=[ + image_annotator.Feature(type=1), + image_annotator.Feature(type=2), + image_annotator.Feature(type=3), + ], + ) + + # Perform the single image request. + self.client.annotate_image(request) + + # Evalute the argument sent to batch_annotate_images. + assert batch_annotate.call_count == 1 + _, args, kwargs = batch_annotate.mock_calls[0] + + # Only a single request object should be sent. + assert len(args[0]) == 1 + + # Evalute the request object to ensure it looks correct. + request_sent = args[0][0] + assert request_sent.image is request.image + assert len(request_sent.features) == 3 + for feature, i in zip(request_sent.features, range(1, 4)): + assert feature.type == i + assert feature.max_results == 0