diff --git a/vision/google/cloud/vision/__init__.py b/vision/google/cloud/vision/__init__.py index 31a04df4b910..9867337983cf 100644 --- a/vision/google/cloud/vision/__init__.py +++ b/vision/google/cloud/vision/__init__.py @@ -12,6 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. +# ----------------------------------------------------------------------------- +# TRANSITION CODE +# ----------------------------------------------------------------------------- +# The old Vision manual layer is now deprecated, but to allow +# users the time to move from the manual layer to the mostly auto-generated +# layer, they are both living side by side for a few months. +# +# Instantiating the old manual layer (`google.cloud.vision.Client`) will +# issue a DeprecationWarning. +# +# When it comes time to remove the old layer, everything in this directory +# should go away EXCEPT __init__.py, decorators.py, and helpers.py. +# Additionally, the import and export of `Client` should be removed from this +# file (along with this note), and the rest should be left intact. +# ----------------------------------------------------------------------------- + from __future__ import absolute_import +from pkg_resources import get_distribution +__version__ = get_distribution('google-cloud-vision').version + +from google.cloud.vision.client import Client from google.cloud.vision_v1 import * + + +__all__ = ( + # Common + '__version__', + + # Manual Layer + 'Client', + + # GAPIC & Partial Manual Layer + 'enums', + 'ImageAnnotatorClient', + 'types', +) diff --git a/vision/google/cloud/vision/_gax.py b/vision/google/cloud/vision/_gax.py new file mode 100644 index 000000000000..44a55e0f09e5 --- /dev/null +++ b/vision/google/cloud/vision/_gax.py @@ -0,0 +1,114 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GAX Client for interacting with the Google Cloud Vision API.""" + +from google.cloud.gapic.vision.v1 import image_annotator_client +from google.cloud.proto.vision.v1 import image_annotator_pb2 + +from google.cloud.vision import __version__ +from google.cloud.vision.annotations import Annotations + + +class _GAPICVisionAPI(object): + """Vision API for interacting with the gRPC version of Vision. + + :type client: :class:`~google.cloud.vision.client.Client` + :param client: Instance of ``Client`` object. + """ + def __init__(self, client=None): + self._client = client + self._annotator_client = image_annotator_client.ImageAnnotatorClient( + credentials=client._credentials, lib_name='gccl', + lib_version=__version__) + + def annotate(self, images=None, requests_pb=None): + """Annotate images through GAX. + + :type images: list + :param images: List containing pairs of + :class:`~google.cloud.vision.image.Image` and + :class:`~google.cloud.vision.feature.Feature`. + e.g. [(image, [feature_one, feature_two]),] + + :type requests_pb: list + :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageRequest` + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.annotations.Annotations`. + """ + if any([images, requests_pb]) is False: + return [] + + if requests_pb is None: + requests = [] + for image, features in images: + gapic_features = [_to_gapic_feature(feature) + for feature in features] + gapic_image = _to_gapic_image(image) + request = image_annotator_pb2.AnnotateImageRequest( + image=gapic_image, features=gapic_features) + requests.append(request) + else: + requests = requests_pb + + annotator_client = self._annotator_client + responses = annotator_client.batch_annotate_images(requests).responses + return [Annotations.from_pb(response) for response in responses] + + +def _to_gapic_feature(feature): + """Helper function to convert a ``Feature`` to a gRPC ``Feature``. + + :type feature: :class:`~google.cloud.vision.feature.Feature` + :param feature: Local ``Feature`` class to be converted to gRPC ``Feature`` + instance. + + :rtype: :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.Feature` + :returns: gRPC ``Feature`` converted from + :class:`~google.cloud.vision.feature.Feature`. + """ + return image_annotator_pb2.Feature( + type=getattr(image_annotator_pb2.Feature, feature.feature_type), + max_results=feature.max_results) + + +def _to_gapic_image(image): + """Helper function to convert an ``Image`` to a gRPC ``Image``. + + :type image: :class:`~google.cloud.vision.image.Image` + :param image: Local ``Image`` class to be converted to gRPC ``Image``. + + :rtype: :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.Image` + :returns: gRPC ``Image`` converted from + :class:`~google.cloud.vision.image.Image`. + """ + if image.content is not None: + return image_annotator_pb2.Image(content=image.content) + if image.source is not None: + if image.source.startswith('gs://'): + return image_annotator_pb2.Image( + source=image_annotator_pb2.ImageSource( + gcs_image_uri=image.source + ), + ) + elif image.source.startswith(('http://', 'https://')): + return image_annotator_pb2.Image( + source=image_annotator_pb2.ImageSource( + image_uri=image.source + ), + ) + raise ValueError('No image content or source found.') diff --git a/vision/google/cloud/vision/_http.py b/vision/google/cloud/vision/_http.py new file mode 100644 index 000000000000..178e2c8dc0ec --- /dev/null +++ b/vision/google/cloud/vision/_http.py @@ -0,0 +1,121 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HTTP Client for interacting with the Google Cloud Vision API.""" + +import json + +from google.cloud import _http + +from google.cloud.vision import __version__ +from google.cloud.vision.annotations import Annotations +from google.cloud.vision.feature import Feature + +from google.protobuf import json_format + + +_CLIENT_INFO = _http.CLIENT_INFO_TEMPLATE.format(__version__) + + +class Connection(_http.JSONConnection): + """A connection to Google Cloud Vision via the JSON REST API. + + :type client: :class:`~google.cloud.vision.client.Client` + :param client: The client that owns the current connection. + """ + + API_BASE_URL = 'https://vision.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v1' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' + """A template for the URL of a particular API call.""" + + _EXTRA_HEADERS = { + _http.CLIENT_INFO_HEADER: _CLIENT_INFO, + } + + +class _HTTPVisionAPI(object): + """Vision API for interacting with the JSON/HTTP version of Vision + + :type client: :class:`~google.cloud.core.client.Client` + :param client: Instance of ``Client`` object. + """ + + def __init__(self, client): + self._client = client + self._connection = Connection(client) + + def annotate(self, images=None, requests_pb=None): + """Annotate an image to discover it's attributes. + + :type images: list of :class:`~google.cloud.vision.image.Image` + :param images: A list of ``Image``. + + :rtype: list + :returns: List of :class:`~googe.cloud.vision.annotations.Annotations`. + + :type requests_pb: list + :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ + image_annotator_b2.AnnotateImageRequest`. + + :rtype: list + :returns: List of :class:`~googe.cloud.vision.annotations.Annotations`. + """ + if any([images, requests_pb]) is False: + return [] + + requests = [] + if requests_pb is None: + for image, features in images: + requests.append(_make_request(image, features)) + else: + requests = [json.loads(json_format.MessageToJson(request)) + for request in requests_pb] + + data = {'requests': requests} + + api_response = self._connection.api_request( + method='POST', path='/images:annotate', data=data) + responses = api_response.get('responses') + return [Annotations.from_api_repr(response) for response in responses] + + +def _make_request(image, features): + """Prepare request object to send to Vision API. + + :type image: :class:`~google.cloud.vision.image.Image` + :param image: Instance of ``Image``. + + :type features: list of :class:`~google.cloud.vision.feature.Feature` + :param features: Either a list of ``Feature`` instances or a single + instance of ``Feature``. + + :rtype: dict + :returns: Dictionary prepared to send to the Vision API. + """ + if isinstance(features, Feature): + features = [features] + + feature_check = (isinstance(feature, Feature) for feature in features) + if not any(feature_check): + raise TypeError('Feature or list of Feature classes are required.') + + return { + 'image': image.as_dict(), + 'features': [feature.as_dict() for feature in features], + } diff --git a/vision/google/cloud/vision/annotations.py b/vision/google/cloud/vision/annotations.py new file mode 100644 index 000000000000..402ea01172a7 --- /dev/null +++ b/vision/google/cloud/vision/annotations.py @@ -0,0 +1,289 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-arguments +"""Annotations management for Vision API responses.""" + +import six + +from google.cloud.vision.color import ImagePropertiesAnnotation +from google.cloud.vision.crop_hint import CropHint +from google.cloud.vision.entity import EntityAnnotation +from google.cloud.vision.face import Face +from google.cloud.vision.safe_search import SafeSearchAnnotation +from google.cloud.vision.text import TextAnnotation +from google.cloud.vision.web import WebDetection + + +_CROP_HINTS_ANNOTATION = 'cropHintsAnnotation' +_FACE_ANNOTATIONS = 'faceAnnotations' +_FULL_TEXT_ANNOTATION = 'fullTextAnnotation' +_IMAGE_PROPERTIES_ANNOTATION = 'imagePropertiesAnnotation' +_SAFE_SEARCH_ANNOTATION = 'safeSearchAnnotation' +_WEB_DETECTION = 'webDetection' + +_KEY_MAP = { + _CROP_HINTS_ANNOTATION: 'crop_hints', + _FACE_ANNOTATIONS: 'faces', + _FULL_TEXT_ANNOTATION: 'full_texts', + _IMAGE_PROPERTIES_ANNOTATION: 'properties', + 'labelAnnotations': 'labels', + 'landmarkAnnotations': 'landmarks', + 'logoAnnotations': 'logos', + _SAFE_SEARCH_ANNOTATION: 'safe_searches', + 'textAnnotations': 'texts', + _WEB_DETECTION: 'web', +} + + +class Annotations(object): + """Helper class to bundle annotation responses. + + :type crop_hints: list + :param crop_hints: List of + :class:`~google.cloud.vision.crop_hint.CropHintsAnnotation`. + + :type faces: list + :param faces: List of :class:`~google.cloud.vision.face.Face`. + + :type full_texts: list + :param full_texts: List of + :class:`~google.cloud.vision.text.TextAnnotation`. + + :type properties: list + :param properties: + List of :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. + + :type labels: list + :param labels: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + + :type landmarks: list + :param landmarks: List of + :class:`~google.cloud.vision.entity.EntityAnnotation.` + + :type logos: list + :param logos: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + + :type safe_searches: list + :param safe_searches: + List of :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` + + :type texts: list + :param texts: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + + :type web: list + :param web: List of :class:`~google.cloud.vision.web.WebDetection`. + """ + def __init__(self, crop_hints=(), faces=(), full_texts=(), properties=(), + labels=(), landmarks=(), logos=(), safe_searches=(), + texts=(), web=()): + self.crop_hints = crop_hints + self.faces = faces + self.full_texts = full_texts + self.properties = properties + self.labels = labels + self.landmarks = landmarks + self.logos = logos + self.safe_searches = safe_searches + self.texts = texts + self.web = web + + @classmethod + def from_api_repr(cls, response): + """Factory: construct an instance of ``Annotations`` from a response. + + :type response: dict + :param response: Vision API response object. + + :rtype: :class:`~google.cloud.vision.annotations.Annotations` + :returns: An instance of ``Annotations`` with detection types loaded. + """ + annotations = { + _KEY_MAP[feature_type]: _entity_from_response_type( + feature_type, annotation) + for feature_type, annotation in six.iteritems(response) + if feature_type in _KEY_MAP + } + return cls(**annotations) + + @classmethod + def from_pb(cls, response): + """Factory: construct an instance of ``Annotations`` from protobuf. + + :type response: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageResponse` + :param response: ``AnnotateImageResponse`` from protobuf call. + + :rtype: :class:`~google.cloud.vision.annotations.Annotations` + :returns: ``Annotations`` instance populated from gRPC response. + """ + annotations = _process_image_annotations(response) + return cls(**annotations) + + +def _process_image_annotations(image): + """Helper for processing annotation types from protobuf. + + :type image: :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.\ + AnnotateImageResponse` + :param image: ``AnnotateImageResponse`` from protobuf. + + :rtype: dict + :returns: Dictionary populated with entities from response. + """ + return { + 'crop_hints': _make_crop_hints_from_pb(image.crop_hints_annotation), + 'faces': _make_faces_from_pb(image.face_annotations), + 'full_texts': _make_full_text_from_pb(image.full_text_annotation), + 'labels': _make_entity_from_pb(image.label_annotations), + 'landmarks': _make_entity_from_pb(image.landmark_annotations), + 'logos': _make_entity_from_pb(image.logo_annotations), + 'properties': _make_image_properties_from_pb( + image.image_properties_annotation), + 'safe_searches': _make_safe_search_from_pb( + image.safe_search_annotation), + 'texts': _make_entity_from_pb(image.text_annotations), + 'web': _make_web_detection_from_pb(image.web_detection) + } + + +def _make_crop_hints_from_pb(crop_hints): + """Create list of ``CropHint`` objects from a protobuf response. + + :type crop_hints: list + :param crop_hints: List of + :class:`google.cloud.grpc.vision.v1.\ + image_annotator_pb2.CropHintsAnnotation` + + :rtype: list + :returns: List of ``CropHint`` objects. + """ + return [CropHint.from_pb(hint) for hint in crop_hints.crop_hints] + + +def _make_entity_from_pb(annotations): + """Create an entity from a protobuf response. + + :type annotations: + :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.EntityAnnotation` + :param annotations: protobuf instance of ``EntityAnnotation``. + + :rtype: list + :returns: List of ``EntityAnnotation``. + """ + return [EntityAnnotation.from_pb(annotation) for annotation in annotations] + + +def _make_faces_from_pb(faces): + """Create face objects from a protobuf response. + + :type faces: + :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.FaceAnnotation` + :param faces: Protobuf instance of ``FaceAnnotation``. + + :rtype: list + :returns: List of ``Face``. + """ + return [Face.from_pb(face) for face in faces] + + +def _make_full_text_from_pb(full_text): + """Create text annotation object from protobuf response. + + :type full_text: :class:`~google.cloud.proto.vision.v1.\ + text_annotation_pb2.TextAnnotation` + :param full_text: Protobuf instance of ``TextAnnotation``. + + :rtype: :class:`~google.cloud.vision.text.TextAnnotation` + :returns: Instance of ``TextAnnotation``. + """ + return TextAnnotation.from_pb(full_text) + + +def _make_image_properties_from_pb(image_properties): + """Create ``ImageProperties`` object from a protobuf response. + + :type image_properties: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.ImagePropertiesAnnotation` + :param image_properties: Protobuf instance of + ``ImagePropertiesAnnotation``. + + :rtype: list or ``None`` + :returns: List of ``ImageProperties`` or ``None``. + """ + return ImagePropertiesAnnotation.from_pb(image_properties) + + +def _make_safe_search_from_pb(safe_search): + """Create ``SafeSearchAnnotation`` object from a protobuf response. + + :type safe_search: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.SafeSearchAnnotation` + :param safe_search: Protobuf instance of ``SafeSearchAnnotation``. + + :rtype: :class: `~google.cloud.vision.safe_search.SafeSearchAnnotation` + :returns: Instance of ``SafeSearchAnnotation``. + """ + return SafeSearchAnnotation.from_pb(safe_search) + + +def _make_web_detection_from_pb(annotation): + """Create ``WebDetection`` object from a protobuf response. + + :type annotation: :class:`~google.cloud.proto.vision.v1.web_detection_pb2\ + .WebDetection` + :param annotation: Protobuf instance of ``WebDetection``. + + :rtype: :class: `~google.cloud.vision.web.WebDetection` + :returns: Instance of ``WebDetection``. + """ + return WebDetection.from_pb(annotation) + + +def _entity_from_response_type(feature_type, results): + """Convert a JSON result to an entity type based on the feature. + + :rtype: list + :returns: List containing any of + :class:`~google.cloud.vision.entity.EntityAnnotation`, + :class:`~google.cloud.vision.face.Face` + + or one of + + :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`, + :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. + """ + detected_objects = [] + if feature_type == _FACE_ANNOTATIONS: + detected_objects.extend( + Face.from_api_repr(face) for face in results) + elif feature_type == _IMAGE_PROPERTIES_ANNOTATION: + return ImagePropertiesAnnotation.from_api_repr(results) + elif feature_type == _SAFE_SEARCH_ANNOTATION: + return SafeSearchAnnotation.from_api_repr(results) + elif feature_type == _WEB_DETECTION: + return WebDetection.from_api_repr(results) + elif feature_type == _CROP_HINTS_ANNOTATION: + crop_hints = results.get('cropHints', []) + detected_objects.extend( + CropHint.from_api_repr(result) for result in crop_hints) + elif feature_type == _FULL_TEXT_ANNOTATION: + return TextAnnotation.from_api_repr(results) + else: + for result in results: + detected_objects.append(EntityAnnotation.from_api_repr(result)) + return detected_objects diff --git a/vision/google/cloud/vision/batch.py b/vision/google/cloud/vision/batch.py new file mode 100644 index 000000000000..1bc0119aeb3a --- /dev/null +++ b/vision/google/cloud/vision/batch.py @@ -0,0 +1,57 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Batch multiple images into one request.""" + + +class Batch(object): + """Batch of images to process. + + :type client: :class:`~google.cloud.vision.client.Client` + :param client: Vision client. + """ + def __init__(self, client): + self._client = client + self._images = [] + + def add_image(self, image, features): + """Add image to batch request. + + :type image: :class:`~google.cloud.vision.image.Image` + :param image: Istance of ``Image``. + + :type features: list + :param features: List of :class:`~google.cloud.vision.feature.Feature`. + """ + self._images.append((image, features)) + + @property + def images(self): + """List of images to process. + + :rtype: list + :returns: List of :class:`~google.cloud.vision.image.Image`. + """ + return self._images + + def detect(self): + """Perform batch detection of images. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.annotations.Annotations`. + """ + results = self._client._vision_api.annotate(self.images) + self._images = [] + return results diff --git a/vision/google/cloud/vision/client.py b/vision/google/cloud/vision/client.py new file mode 100644 index 000000000000..bddc8a2e2984 --- /dev/null +++ b/vision/google/cloud/vision/client.py @@ -0,0 +1,117 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Client for interacting with the Google Cloud Vision API.""" + +import os + +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import DISABLE_GRPC + +from google.cloud.vision._gax import _GAPICVisionAPI +from google.cloud.vision._http import _HTTPVisionAPI +from google.cloud.vision.batch import Batch +from google.cloud.vision.image import Image + + +_USE_GRPC = not os.getenv(DISABLE_GRPC, False) + + +class Client(ClientWithProject): + """Client to bundle configuration needed for API requests. + + :type project: str + :param project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + + :type credentials: :class:`~google.auth.credentials.Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + + :type _http: :class:`~httplib2.Http` + :param _http: (Optional) HTTP object to make requests. Can be any object + that defines ``request()`` with the same interface as + :meth:`~httplib2.Http.request`. If not passed, an + ``_http`` object is created that is bound to the + ``credentials`` for the current object. + This parameter should be considered private, and could + change in the future. + + :type _use_grpc: bool + :param _use_grpc: (Optional) Explicitly specifies whether + to use the gRPC transport (via GAX) or HTTP. If unset, + falls back to the ``GOOGLE_CLOUD_DISABLE_GRPC`` + environment variable. + This parameter should be considered private, and could + change in the future. + """ + + SCOPE = ('https://www.googleapis.com/auth/cloud-platform',) + """The scopes required for authenticating as a Cloud Vision consumer.""" + + _vision_api_internal = None + + def __init__(self, project=None, credentials=None, _http=None, + _use_grpc=None): + super(Client, self).__init__( + project=project, credentials=credentials, _http=_http) + if _use_grpc is None: + self._use_grpc = _USE_GRPC + else: + self._use_grpc = _use_grpc + + def batch(self): + """Batch multiple images into a single API request. + + :rtype: :class:`google.cloud.vision.batch.Batch` + :returns: Instance of ``Batch``. + """ + return Batch(self) + + def image(self, content=None, filename=None, source_uri=None): + """Get instance of Image using current client. + + :type content: bytes + :param content: Byte stream of an image. + + :type filename: str + :param filename: Filename to image. + + :type source_uri: str + :param source_uri: URL or Google Cloud Storage URI of image. + + :rtype: :class:`~google.cloud.vision.image.Image` + :returns: Image instance with the current client attached. + """ + return Image(client=self, content=content, filename=filename, + source_uri=source_uri) + + @property + def _vision_api(self): + """Proxy method that handles which transport call Vision Annotate. + + :rtype: :class:`~google.cloud.vision._http._HTTPVisionAPI` + or :class:`~google.cloud.vision._gax._GAPICVisionAPI` + :returns: Instance of ``_HTTPVisionAPI`` or ``_GAPICVisionAPI`` used to + make requests. + """ + if self._vision_api_internal is None: + if self._use_grpc: + self._vision_api_internal = _GAPICVisionAPI(self) + else: + self._vision_api_internal = _HTTPVisionAPI(self) + return self._vision_api_internal diff --git a/vision/google/cloud/vision/color.py b/vision/google/cloud/vision/color.py new file mode 100644 index 000000000000..205b8f3b1ba6 --- /dev/null +++ b/vision/google/cloud/vision/color.py @@ -0,0 +1,236 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Image properties class representation derived from Vision API response.""" + + +class ImagePropertiesAnnotation(object): + """Representation of image properties + + :type colors: list + :param colors: List of + :class:`~google.cloud.vision.color.ColorInformation`. + """ + def __init__(self, colors): + self._colors = colors + + @classmethod + def from_api_repr(cls, image_properties): + """Factory: construct ``ImagePropertiesAnnotation`` from a response. + + :type image_properties: dict + :param image_properties: Dictionary response from Vision API with image + properties data. + + :rtype: list of + :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. + :returns: List of ``ImagePropertiesAnnotation``. + """ + colors = image_properties.get('dominantColors', {}).get('colors', ()) + return cls([ColorInformation.from_api_repr(color) + for color in colors]) + + @classmethod + def from_pb(cls, image_properties): + """Factory: construct ``ImagePropertiesAnnotation`` from a response. + + :type image_properties: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.ImageProperties` + :param image_properties: Protobuf response from Vision API with image + properties data. + + :rtype: list of + :class:`~google.cloud.vision.color.ImagePropertiesAnnotation` + :returns: List of ``ImagePropertiesAnnotation``. + """ + colors = getattr(image_properties.dominant_colors, 'colors', ()) + if len(colors) > 0: + return cls([ColorInformation.from_pb(color) for color in colors]) + + @property + def colors(self): + """Colors in an image. + + :rtype: list of :class:`~google.cloud.vision.color.ColorInformation` + :returns: Populated list of ``ColorInformation``. + """ + return self._colors + + +class Color(object): + """Representation of RGBA color information. + + :type red: float + :param red: The amount of red in the color as a value in the interval + [0.0, 255.0]. + + :type green: float + :param green: The amount of green in the color as a value in the interval + [0.0, 255.0]. + + :type blue: float + :param blue: The amount of blue in the color as a value in the interval + [0.0, 255.0]. + + :type alpha: float + :param alpha: The fraction of this color that should be applied to the + pixel. + """ + def __init__(self, red, green, blue, alpha): + self._red = red + self._green = green + self._blue = blue + self._alpha = alpha + + @classmethod + def from_api_repr(cls, response): + """Factory: construct a ``Color`` from a Vision API response. + + :type response: dict + :param response: Color from API Response. + + :rtype: :class:`~google.cloud.vision.color.Color` + :returns: Instance of :class:`~google.cloud.vision.color.Color`. + """ + red = float(response.get('red', 0.0)) + green = float(response.get('green', 0.0)) + blue = float(response.get('blue', 0.0)) + alpha = response.get('alpha', 0.0) + + return cls(red, green, blue, alpha) + + @classmethod + def from_pb(cls, color): + """Factory: construct a ``Color`` from a protobuf response. + + :type color: :module: `google.type.color_pb2` + :param color: ``Color`` from API Response. + + :rtype: :class:`~google.cloud.vision.color.Color` + :returns: Instance of :class:`~google.cloud.vision.color.Color`. + """ + return cls(color.red, color.green, color.blue, color.alpha.value) + + @property + def red(self): + """Red component of the color. + + :rtype: int + :returns: Red RGB value. + """ + return self._red + + @property + def green(self): + """Green component of the color. + + :rtype: int + :returns: Green RGB value. + """ + return self._green + + @property + def blue(self): + """Blue component of the color. + + :rtype: int + :returns: Blue RGB value. + """ + return self._blue + + @property + def alpha(self): + """Alpha transparency level. + + :rtype: float + :returns: Alpha transparency level. + """ + return self._alpha + + +class ColorInformation(object): + """Representation of color information from API response. + + :type color: :class:`~google.cloud.vision.color.Color` + :param color: RGB components of the color. + + :type score: float + :param score: Image-specific score for this color. Value in range [0, 1]. + + :type pixel_fraction: float + :param pixel_fraction: Stores the fraction of pixels the color occupies in + the image. Value in range [0, 1]. + """ + def __init__(self, color, score, pixel_fraction): + self._color = color + self._score = score + self._pixel_fraction = pixel_fraction + + @classmethod + def from_api_repr(cls, color_information): + """Factory: construct ``ColorInformation`` for a color. + + :type color_information: dict + :param color_information: Color data with extra meta information. + + :rtype: :class:`~google.cloud.vision.color.ColorInformation` + :returns: Instance of ``ColorInformation``. + """ + color = Color.from_api_repr(color_information.get('color', {})) + score = color_information.get('score') + pixel_fraction = color_information.get('pixelFraction') + return cls(color, score, pixel_fraction) + + @classmethod + def from_pb(cls, color_information): + """Factory: construct ``ColorInformation`` for a color. + + :type color_information: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.ColorInfo` + :param color_information: Color data with extra meta information. + + :rtype: :class:`~google.cloud.vision.color.ColorInformation` + :returns: Instance of ``ColorInformation``. + """ + color = Color.from_pb(color_information.color) + score = color_information.score + pixel_fraction = color_information.pixel_fraction + return cls(color, score, pixel_fraction) + + @property + def color(self): + """RGB components of the color. + + :rtype: :class:`~google.vision.color.Color` + :returns: Instance of ``Color``. + """ + return self._color + + @property + def score(self): + """Image-specific score for this color. Value in range [0, 1]. + + :rtype: float + :returns: Image score for this color. + """ + return self._score + + @property + def pixel_fraction(self): + """Stores the fraction of pixels the color occupies in the image. + + :rtype: float + :returns: Pixel fraction value in range [0, 1]. + """ + return self._pixel_fraction diff --git a/vision/google/cloud/vision/crop_hint.py b/vision/google/cloud/vision/crop_hint.py new file mode 100644 index 000000000000..4d04fbb9b075 --- /dev/null +++ b/vision/google/cloud/vision/crop_hint.py @@ -0,0 +1,92 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Representation of Vision API's crop hints.""" + +from google.cloud.vision.geometry import Bounds + + +class CropHint(object): + """Representation of a crop hint returned from the Vision API. + + :type bounds: dict + :param bounds: Dictionary of boundary information of detected entity. + + :type confidence: float + :param confidence: Confidence of this being a salient region. + + :type importance_fraction: float + :param importance_fraction: Fraction of importance of this region. + """ + def __init__(self, bounds, confidence, importance_fraction): + self._bounds = bounds + self._confidence = confidence + self._importance_fraction = importance_fraction + + @classmethod + def from_api_repr(cls, response): + """Factory: construct ``CropHint`` from Vision API response. + + :type response: dict + :param response: Dictionary response from Vision API with entity data. + + :rtype: :class:`~google.cloud.vision.crop_hint.CropHint` + :returns: Instance of ``CropHint``. + """ + bounds = Bounds.from_api_repr(response.get('boundingPoly')) + confidence = response.get('confidence', 0.0) + importance_fraction = response.get('importanceFraction', 0.0) + return cls(bounds, confidence, importance_fraction) + + @classmethod + def from_pb(cls, response): + """Factory: construct ``CropHint`` from Vision gRPC response. + + :type response: :class:`google.cloud.proto.vision.v1.\ + image_annotator_pb2.CropHint` + :param response: gRPC response from Vision API with entity data. + + :rtype: :class:`~google.cloud.vision.crop_hint.CropHint` + :returns: Instance of ``CropHint``. + """ + bounds = Bounds.from_pb(response.bounding_poly) + return cls(bounds, response.confidence, response.importance_fraction) + + @property + def bounds(self): + """Bounding polygon of crop hints. + + :rtype: :class:`~google.cloud.vision.geometry.Bounds` + :returns: Instance of ``Bounds`` with populated vertices. + """ + return self._bounds + + @property + def confidence(self): + """Confidence of this being a salient region. Range [0, 1]. + + :rtype: float + :returns: float between 0 and 1, inclusive. + """ + return self._confidence + + @property + def importance_fraction(self): + """Fraction of importance of this salient region with respect to the + original image. + + :rtype: float + :returns: float + """ + return self._importance_fraction diff --git a/vision/google/cloud/vision/entity.py b/vision/google/cloud/vision/entity.py new file mode 100644 index 000000000000..5d1e402b362a --- /dev/null +++ b/vision/google/cloud/vision/entity.py @@ -0,0 +1,146 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Entity class for holding information returned from annotating an image.""" + + +from google.cloud.vision.geometry import Bounds +from google.cloud.vision.geometry import LocationInformation + + +class EntityAnnotation(object): + """Representation of an entity returned from the Vision API. + + :type bounds: dict + :param bounds: Dictionary of bounary information of detected entity. + + :type description: str + :param description: Description of entity detected in an image. + + :type locale: str + :param locale: The language code for the locale in which the entity textual + description (next field) is expressed. + + :type locations: list of + :class:`~google.cloud.vision.geometry.LocationInformation`. + :param locations: List of ``LocationInformation`` instances. + + :type mid: str + :param mid: Opaque entity ID. + + :type score: float + :param score: Overall score of the result. Range [0, 1]. + """ + def __init__(self, bounds, description, locale, locations, mid, score): + self._bounds = bounds + self._description = description + self._locale = locale + self._locations = locations + self._mid = mid + self._score = score + + @classmethod + def from_api_repr(cls, response): + """Factory: construct entity from Vision API response. + + :type response: dict + :param response: Dictionary response from Vision API with entity data. + + :rtype: :class:`~google.cloud.vision.entity.EntityAnnotation` + :returns: Instance of ``EntityAnnotation``. + """ + bounds = Bounds.from_api_repr(response.get('boundingPoly')) + description = response['description'] + locale = response.get('locale', None) + locations = [LocationInformation.from_api_repr(location) + for location in response.get('locations', ())] + mid = response.get('mid', None) + score = response.get('score', None) + + return cls(bounds, description, locale, locations, mid, score) + + @classmethod + def from_pb(cls, response): + """Factory: construct entity from Vision gRPC response. + + :type response: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageResponse` + :param response: gRPC response from Vision API with entity data. + + :rtype: :class:`~google.cloud.vision.entity.EntityAnnotation` + :returns: Instance of ``EntityAnnotation``. + """ + bounds = Bounds.from_pb(response.bounding_poly) + description = response.description + locale = response.locale + locations = [LocationInformation.from_pb(location) + for location in response.locations] + mid = response.mid + score = response.score + return cls(bounds, description, locale, locations, mid, score) + + @property + def bounds(self): + """Bounding polygon of detected image feature. + + :rtype: :class:`~google.cloud.vision.geometry.Bounds` + :returns: Instance of ``Bounds`` with populated vertices. + """ + return self._bounds + + @property + def description(self): + """Description of feature detected in image. + + :rtype: str + :returns: String description of feature detected in image. + """ + return self._description + + @property + def locale(self): + """The language code for text discovered in an image. + + :rtype: str + :returns: String language code of text found in the image. + """ + return self._locale + + @property + def locations(self): + """Location coordinates landmarks detected. + + :rtype: :class:`~google.cloud.vision.geometry.LocationInformation` + :returns: ``LocationInformation`` populated with latitude and longitude + of object detected in an image. + """ + return self._locations + + @property + def mid(self): + """MID of feature detected in image. + + :rtype: str + :returns: String MID of feature detected in image. + """ + return self._mid + + @property + def score(self): + """Overall score of the result. Range [0, 1]. + + :rtype: float + :returns: Overall score of the result. Range [0, 1]. + """ + return self._score diff --git a/vision/google/cloud/vision/face.py b/vision/google/cloud/vision/face.py new file mode 100644 index 000000000000..36f4ed54b4ca --- /dev/null +++ b/vision/google/cloud/vision/face.py @@ -0,0 +1,575 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Face class representing the Vision API's face detection response.""" + + +from enum import Enum + +from google.cloud.vision.geometry import BoundsBase +from google.cloud.vision.likelihood import _get_pb_likelihood +from google.cloud.vision.likelihood import Likelihood +from google.cloud.vision.geometry import Position + + +class Angles(object): + """Angles representing the positions of a face.""" + def __init__(self, roll, pan, tilt): + self._roll = roll + self._pan = pan + self._tilt = tilt + + @classmethod + def from_api_repr(cls, angle): + """Factory: construct the angles from an Vision API response. + + :type angle: dict + :param angle: Dictionary representation of an angle. + + :rtype: :class:`~google.cloud.vision.face.Angles` + :returns: An `Angles` instance with data parsed from `response`. + """ + roll = angle['rollAngle'] + pan = angle['panAngle'] + tilt = angle['tiltAngle'] + + return cls(roll, pan, tilt) + + @classmethod + def from_pb(cls, angle): + """Factory: convert protobuf Angle object to local Angle object. + + :type angle: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.FaceAnnotation` + :param angle: Protobuf ``FaceAnnotation`` response with angle data. + + :rtype: :class:`~google.cloud.vision.face.Angles` + :returns: Instance of ``Angles``. + """ + roll = angle.roll_angle + pan = angle.pan_angle + tilt = angle.tilt_angle + + return cls(roll, pan, tilt) + + @property + def roll(self): + """Roll angle of face. + + :rtype: float + :returns: Roll angle of face in degrees. + """ + return self._roll + + @property + def pan(self): + """Pan angle of face. + + :rtype: float + :returns: Pan angle of face in degrees. + """ + return self._pan + + @property + def tilt(self): + """Tilt angle of face. + + :rtype: float + :returns: Tilt angle of face in degrees. + """ + return self._tilt + + +class Bounds(BoundsBase): + """The bounding polygon of the entire face.""" + + +class Emotions(object): + """Emotions displayed by the face detected in an image.""" + def __init__(self, joy_likelihood, sorrow_likelihood, + surprise_likelihood, anger_likelihood): + self._joy_likelihood = joy_likelihood + self._sorrow_likelihood = sorrow_likelihood + self._surprise_likelihood = surprise_likelihood + self._anger_likelihood = anger_likelihood + + @classmethod + def from_api_repr(cls, emotions): + """Factory: construct ``Emotions`` from Vision API response. + + :type emotions: dict + :param emotions: Response dictionary representing a face. + + :rtype: :class:`~google.cloud.vision.face.Emotions` + :returns: Populated instance of ``Emotions``. + """ + joy_likelihood = Likelihood[emotions['joyLikelihood']] + sorrow_likelihood = Likelihood[emotions['sorrowLikelihood']] + surprise_likelihood = Likelihood[emotions['surpriseLikelihood']] + anger_likelihood = Likelihood[emotions['angerLikelihood']] + + return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood, + anger_likelihood) + + @classmethod + def from_pb(cls, emotions): + """Factory: construct ``Emotions`` from Vision API response. + + :type emotions: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.FaceAnnotation` + :param emotions: Response dictionary representing a face with emotions. + + :rtype: :class:`~google.cloud.vision.face.Emotions` + :returns: Populated instance of ``Emotions``. + """ + joy_likelihood = _get_pb_likelihood(emotions.joy_likelihood) + sorrow_likelihood = _get_pb_likelihood(emotions.sorrow_likelihood) + surprise_likelihood = _get_pb_likelihood(emotions.surprise_likelihood) + anger_likelihood = _get_pb_likelihood(emotions.anger_likelihood) + + return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood, + anger_likelihood) + + @property + def anger(self): + """Likelihood of anger in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self._anger_likelihood + + @property + def joy(self): + """Likelihood of joy in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self._joy_likelihood + + @property + def sorrow(self): + """Likelihood of sorrow in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self._sorrow_likelihood + + @property + def surprise(self): + """Likelihood of surprise in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self._surprise_likelihood + + +class Face(object): + """Representation of a face found by the Vision API""" + + def __init__(self, angles, bounds, detection_confidence, emotions, + fd_bounds, headwear_likelihood, image_properties, landmarks, + landmarking_confidence): + self._angles = angles + self._bounds = bounds + self._detection_confidence = detection_confidence + self._emotions = emotions + self._fd_bounds = fd_bounds + self._headwear_likelihood = headwear_likelihood + self._landmarks = landmarks + self._landmarking_confidence = landmarking_confidence + self._image_properties = image_properties + + @classmethod + def from_api_repr(cls, face): + """Factory: construct an instance of a Face from an API response + + :type face: dict + :param face: Face annotation dict returned from the Vision API. + + :rtype: :class:`~google.cloud.vision.face.Face` + :returns: A instance of `Face` with data parsed from `response`. + """ + face_data = { + 'angles': Angles.from_api_repr(face), + 'bounds': Bounds.from_api_repr(face['boundingPoly']), + 'detection_confidence': face['detectionConfidence'], + 'emotions': Emotions.from_api_repr(face), + 'fd_bounds': FDBounds.from_api_repr(face['fdBoundingPoly']), + 'headwear_likelihood': Likelihood[face['headwearLikelihood']], + 'image_properties': FaceImageProperties.from_api_repr(face), + 'landmarks': Landmarks.from_api_repr(face['landmarks']), + 'landmarking_confidence': face['landmarkingConfidence'], + } + return cls(**face_data) + + @classmethod + def from_pb(cls, face): + """Factory: construct an instance of a Face from an protobuf response + + :type face: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageResponse` + :param face: ``AnnotateImageResponse`` from gRPC call. + + :rtype: :class:`~google.cloud.vision.face.Face` + :returns: A instance of `Face` with data parsed from ``response``. + """ + face_data = { + 'angles': Angles.from_pb(face), + 'bounds': Bounds.from_pb(face.bounding_poly), + 'detection_confidence': face.detection_confidence, + 'emotions': Emotions.from_pb(face), + 'fd_bounds': FDBounds.from_pb(face.fd_bounding_poly), + 'headwear_likelihood': _get_pb_likelihood( + face.headwear_likelihood), + 'image_properties': FaceImageProperties.from_pb(face), + 'landmarks': Landmarks.from_pb(face.landmarks), + 'landmarking_confidence': face.landmarking_confidence, + } + return cls(**face_data) + + @property + def anger(self): + """Accessor to likelihood that the detected face is angry. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self.emotions.anger + + @property + def angles(self): + """Accessor to the pan, tilt and roll angles of a Face. + + :rtype: :class:`~google.cloud.vision.face.Angles` + :returns: Pan, tilt and roll angles of the detected face. + """ + + return self._angles + + @property + def bounds(self): + """Accessor to the bounding poly information of the detected face. + + :rtype: :class:`~google.cloud.vision.face.Bounds` + :returns: An instance of ``Bounds`` which has a list of vertices. + """ + return self._bounds + + @property + def detection_confidence(self): + """Face detection confidence score determined by the Vision API. + + :rtype: float + :returns: Float representation of confidence ranging from 0 to 1. + """ + return self._detection_confidence + + @property + def emotions(self): + """Accessor to the possible emotions expressed in the detected face. + + :rtype: :class:`~google.cloud.vision.face.Emotions` + :returns: An instance of ``Emotions`` with joy, sorrow, anger, surprise + likelihood. + """ + return self._emotions + + @property + def fd_bounds(self): + """Accessor to the skin area bounding poly of the detected face. + + :rtype: :class:`~google.cloud.vision.image.FDBounds` + :returns: An instance of ``FDBounds`` which has a list of vertices. + """ + return self._fd_bounds + + @property + def headwear(self): + """Headwear likelihood. + + :rtype: :class:`~google.cloud.vision.face.Likelihood` + :returns: String representing the likelihood based on + :class:`~google.cloud.vision.face.Likelihood` + """ + return self._headwear_likelihood + + @property + def image_properties(self): + """Image properties from imaged used in face detection. + + :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` + :returns: ``FaceImageProperties`` object with image properties. + """ + return self._image_properties + + @property + def joy(self): + """Likelihood of joy in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self.emotions.joy + + @property + def landmarks(self): + """Accessor to the facial landmarks detected in a face. + + :rtype: :class:`~google.cloud.vision.face.Landmarks` + :returns: ``Landmarks`` object with facial landmarks as properies. + """ + return self._landmarks + + @property + def landmarking_confidence(self): + """Landmarking confidence score determinged by the Vision API. + + :rtype: float + :returns: Float representing the confidence of the Vision API in + determining the landmarks on a face. + """ + return self._landmarking_confidence + + @property + def sorrow(self): + """Likelihood of sorrow in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self.emotions.sorrow + + @property + def surprise(self): + """Likelihood of surprise in detected face. + + :rtype: str + :returns: String derived from + :class:`~google.cloud.vision.face.Likelihood`. + """ + return self.emotions.surprise + + +class FaceImageProperties(object): + """A representation of the image properties from face detection.""" + def __init__(self, blurred_likelihood, underexposed_likelihood): + self._blurred_likelihood = blurred_likelihood + self._underexposed_likelihood = underexposed_likelihood + + @classmethod + def from_api_repr(cls, face): + """Factory: construct image properties from image. + + :type face: dict + :param face: Dictionary representation of a ``Face``. + + :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` + :returns: Instance populated with image property data. + """ + blurred = Likelihood[face['blurredLikelihood']] + underexposed = Likelihood[face['underExposedLikelihood']] + + return cls(blurred, underexposed) + + @classmethod + def from_pb(cls, face): + """Factory: construct image properties from image. + + :type face: :class:`~google.cloud.proto.vision.v1.image_annotator_pb2.\ + FaceAnnotation` + :param face: Protobuf instace of `Face`. + + :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` + :returns: Instance populated with image property data. + """ + blurred = _get_pb_likelihood(face.blurred_likelihood) + underexposed = _get_pb_likelihood(face.under_exposed_likelihood) + + return cls(blurred, underexposed) + + @property + def blurred(self): + """Likelihood of the image being blurred. + + :rtype: str + :returns: String representation derived from + :class:`~google.cloud.vision.face.Position`. + """ + return self._blurred_likelihood + + @property + def underexposed(self): + """Likelihood that the image used for detection was underexposed. + + :rtype: str + :returns: String representation derived from + :class:`~google.cloud.vision.face.Position`. + """ + return self._underexposed_likelihood + + +class LandmarkTypes(Enum): + """A representation of the face detection landmark types. + + See: + https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#type_1 + """ + UNKNOWN_LANDMARK = 0 + LEFT_EYE = 1 + RIGHT_EYE = 2 + LEFT_OF_LEFT_EYEBROW = 3 + RIGHT_OF_LEFT_EYEBROW = 4 + LEFT_OF_RIGHT_EYEBROW = 5 + RIGHT_OF_RIGHT_EYEBROW = 6 + MIDPOINT_BETWEEN_EYES = 7 + NOSE_TIP = 8 + UPPER_LIP = 9 + LOWER_LIP = 10 + MOUTH_LEFT = 11 + MOUTH_RIGHT = 12 + MOUTH_CENTER = 13 + NOSE_BOTTOM_RIGHT = 14 + NOSE_BOTTOM_LEFT = 15 + NOSE_BOTTOM_CENTER = 16 + LEFT_EYE_TOP_BOUNDARY = 17 + LEFT_EYE_RIGHT_CORNER = 18 + LEFT_EYE_BOTTOM_BOUNDARY = 19 + LEFT_EYE_LEFT_CORNER = 20 + RIGHT_EYE_TOP_BOUNDARY = 21 + RIGHT_EYE_RIGHT_CORNER = 22 + RIGHT_EYE_BOTTOM_BOUNDARY = 23 + RIGHT_EYE_LEFT_CORNER = 24 + LEFT_EYEBROW_UPPER_MIDPOINT = 25 + RIGHT_EYEBROW_UPPER_MIDPOINT = 26 + LEFT_EAR_TRAGION = 27 + RIGHT_EAR_TRAGION = 28 + LEFT_EYE_PUPIL = 29 + RIGHT_EYE_PUPIL = 30 + FOREHEAD_GLABELLA = 31 + CHIN_GNATHION = 32 + CHIN_LEFT_GONION = 33 + CHIN_RIGHT_GONION = 34 + + +class FDBounds(BoundsBase): + """The bounding polygon of just the skin portion of the face.""" + + +class Landmark(object): + """A face-specific landmark (for example, a face feature, left eye). + + :type landmark_type: :class:`~google.cloud.vision.face.LandmarkTypes` + :param landmark_type: Instance of ``LandmarkTypes``. + + :type position: :class:`~google.cloud.vision.face.Position` + :param position: + """ + def __init__(self, position, landmark_type): + self._position = position + self._landmark_type = landmark_type + + @classmethod + def from_api_repr(cls, landmark): + """Factory: construct an instance of a Landmark from a response. + + :type landmark: dict + :param landmark: Landmark representation from Vision API. + + :rtype: :class:`~google.cloud.vision.face.Landmark` + :returns: Populated instance of ``Landmark``. + """ + position = Position.from_api_repr(landmark['position']) + landmark_type = LandmarkTypes[landmark['type']] + return cls(position, landmark_type) + + @classmethod + def from_pb(cls, landmark): + """Factory: construct an instance of a Landmark from a response. + + :type landmark: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb.FaceAnnotation.Landmark` + :param landmark: Landmark representation from Vision API. + + :rtype: :class:`~google.cloud.vision.face.Landmark` + :returns: Populated instance of ``Landmark``. + """ + position = Position.from_pb(landmark.position) + landmark_type = LandmarkTypes(landmark.type) + return cls(position, landmark_type) + + @property + def position(self): + """Landmark position on face. + + :rtype: :class:`~google.cloud.vision.face.Position` + :returns: Instance of `Position` with landmark coordinates. + """ + return self._position + + @property + def landmark_type(self): + """Landmark type of facial feature. + + :rtype: str + :returns: String representation of facial landmark type. + """ + return self._landmark_type + + +class Landmarks(object): + """Landmarks detected on a face represented as properties. + + :type landmarks: list + :param landmarks: List of :class:`~google.cloud.vision.face.Landmark`. + """ + def __init__(self, landmarks): + for landmark in landmarks: + setattr(self, landmark.landmark_type.name.lower(), landmark) + + @classmethod + def from_api_repr(cls, landmarks): + """Factory: construct facial landmarks from Vision API response. + + :type landmarks: dict + :param landmarks: JSON face annotation. + + :rtype: :class:`~google.cloud.vision.face.Landmarks` + :returns: Instance of ``Landmarks`` populated with facial landmarks. + """ + return cls([Landmark.from_api_repr(landmark) + for landmark in landmarks]) + + @classmethod + def from_pb(cls, landmarks): + """Factory: construct facial landmarks from Vision gRPC response. + + :type landmarks: :class:`~google.protobuf.internal.containers.\ + RepeatedCompositeFieldContainer` + :param landmarks: List of facial landmarks. + + :rtype: :class:`~google.cloud.vision.face.Landmarks` + :returns: Instance of ``Landmarks`` populated with facial landmarks. + """ + return cls([Landmark.from_pb(landmark) for landmark in landmarks]) diff --git a/vision/google/cloud/vision/feature.py b/vision/google/cloud/vision/feature.py new file mode 100644 index 000000000000..2a2b5b2d6ef7 --- /dev/null +++ b/vision/google/cloud/vision/feature.py @@ -0,0 +1,86 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Feature represenging various types of annotating.""" + + +class FeatureTypes(object): + """Feature Types to indication which annotations to perform. + + See: + https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Type + """ + CROP_HINTS = 'CROP_HINTS' + DOCUMENT_TEXT_DETECTION = 'DOCUMENT_TEXT_DETECTION' + FACE_DETECTION = 'FACE_DETECTION' + IMAGE_PROPERTIES = 'IMAGE_PROPERTIES' + LABEL_DETECTION = 'LABEL_DETECTION' + LANDMARK_DETECTION = 'LANDMARK_DETECTION' + LOGO_DETECTION = 'LOGO_DETECTION' + SAFE_SEARCH_DETECTION = 'SAFE_SEARCH_DETECTION' + TEXT_DETECTION = 'TEXT_DETECTION' + WEB_DETECTION = 'WEB_DETECTION' + + +class Feature(object): + """Feature object specifying the annotation type and maximum results. + + :type feature_type: str + :param feature_type: String representation of + :class:`~google.cloud.vision.feature.FeatureType`. + + :type max_results: int + :param max_results: Number of results to return for the specified + feature type. + + See: + https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Feature + """ + def __init__(self, feature_type, max_results=1): + try: + self._feature_type = getattr(FeatureTypes, feature_type) + except AttributeError: + raise AttributeError('Feature type passed in cannot be found.') + self._max_results = int(max_results) + + def as_dict(self): + """Generate dictionary for Feature request format. + + :rtype: dict + :returns: Dictionary representation of a + :class:`~google.cloud.vision.feature.FeatureType`. + """ + return { + 'type': self.feature_type, + 'maxResults': self.max_results + } + + @property + def feature_type(self): + """"Feature type string. + + :rtype: :class:`~google.cloud.vision.feature.FeatureTypes` + :returns: Instance of + :class:`~google.cloud.vision.feature.FeatureTypes` + """ + return self._feature_type + + @property + def max_results(self): + """Maximum number of results for feature type. + + :rtype: int + :returns: Maxium results to be returned. + """ + return self._max_results diff --git a/vision/google/cloud/vision/geometry.py b/vision/google/cloud/vision/geometry.py new file mode 100644 index 000000000000..9779282ef240 --- /dev/null +++ b/vision/google/cloud/vision/geometry.py @@ -0,0 +1,244 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Geometry and other generic classes used by the Vision API.""" + + +class BoundsBase(object): + """Base class for handling bounds with vertices. + + :type vertices: list of :class:`~google.cloud.vision.geometry.Vertex` + :param vertices: List of vertcies describing points on an image. + """ + def __init__(self, vertices): + self._vertices = vertices + + @classmethod + def from_api_repr(cls, vertices): + """Factory: construct BoundsBase instance from Vision API response. + + :type vertices: dict + :param vertices: List of vertices. + + :rtype: :class:`~google.cloud.vision.geometry.BoundsBase` or None + :returns: Instance of ``BoundsBase`` with populated verticies or None. + """ + if vertices is None: + return None + return cls([Vertex(vertex.get('x', None), vertex.get('y', None)) + for vertex in vertices.get('vertices', ())]) + + @classmethod + def from_pb(cls, vertices): + """Factory: construct BoundsBase instance from a protobuf response. + + :type vertices: :class:`~google.cloud.proto.vision.v1.\ + geometry_pb2.BoundingPoly` + :param vertices: List of vertices. + + :rtype: :class:`~google.cloud.vision.geometry.BoundsBase` or None + :returns: Instance of ``BoundsBase`` with populated verticies. + """ + return cls([Vertex(vertex.x, vertex.y) + for vertex in vertices.vertices]) + + @property + def vertices(self): + """List of vertices. + + :rtype: list of :class:`~google.cloud.vision.geometry.Vertex` + :returns: List of populated vertices. + """ + return self._vertices + + +class Bounds(BoundsBase): + """A polygon boundry of the detected feature.""" + + +class FDBounds(BoundsBase): + """The bounding polygon of just the skin portion of the face.""" + + +class LocationInformation(object): + """Representation of location information returned by the Vision API. + + :type latitude: float + :param latitude: Latitude coordinate of geographical location. + + :type longitude: float + :param longitude: Longitude coordinate of geographical location. + """ + def __init__(self, latitude, longitude): + self._latitude = latitude + self._longitude = longitude + + @classmethod + def from_api_repr(cls, location_info): + """Factory: construct location information from Vision API response. + + :type location_info: dict + :param location_info: Dictionary response of locations. + + :rtype: :class:`~google.cloud.vision.geometry.LocationInformation` + :returns: ``LocationInformation`` with populated latitude and + longitude. + """ + lat_long = location_info.get('latLng', {}) + latitude = lat_long.get('latitude') + longitude = lat_long.get('longitude') + return cls(latitude, longitude) + + @classmethod + def from_pb(cls, location_info): + """Factory: construct location information from a protobuf response. + + :type location_info: :class:`~google.cloud.vision.v1.LocationInfo` + :param location_info: Protobuf response with ``LocationInfo``. + + :rtype: :class:`~google.cloud.vision.geometry.LocationInformation` + :returns: ``LocationInformation`` with populated latitude and + longitude. + """ + return cls(location_info.lat_lng.latitude, + location_info.lat_lng.longitude) + + @property + def latitude(self): + """Latitude coordinate. + + :rtype: float + :returns: Latitude coordinate of location. + """ + return self._latitude + + @property + def longitude(self): + """Longitude coordinate. + + :rtype: float + :returns: Longitude coordinate of location. + """ + return self._longitude + + +class Position(object): + """A 3D position in the image. + + See: + https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Position + + :type x_coordinate: float + :param x_coordinate: X position coordinate. + + :type y_coordinate: float + :param y_coordinate: Y position coordinate. + + :type z_coordinate: float + :param z_coordinate: Z position coordinate. + """ + def __init__(self, x_coordinate=None, y_coordinate=None, + z_coordinate=None): + self._x_coordinate = x_coordinate + self._y_coordinate = y_coordinate + self._z_coordinate = z_coordinate + + @classmethod + def from_api_repr(cls, position): + """Factory: construct 3D position from API response. + + :type position: dict + :param position: Dictionary with 3 axis position data. + + :rtype: :class:`~google.cloud.vision.geometry.Position` + :returns: ``Position`` constructed with 3D points from API response. + """ + x_coordinate = position['x'] + y_coordinate = position['y'] + z_coordinate = position['z'] + return cls(x_coordinate, y_coordinate, z_coordinate) + + @classmethod + def from_pb(cls, response_position): + """Factory: construct 3D position from API response. + + :rtype: :class:`~google.cloud.vision.geometry.Position` + :returns: ``Position`` constructed with 3D points from API response. + """ + x_coordinate = response_position.x + y_coordinate = response_position.y + z_coordinate = response_position.z + return cls(x_coordinate, y_coordinate, z_coordinate) + + @property + def x_coordinate(self): + """X position coordinate. + + :rtype: float + :returns: X position coordinate. + """ + return self._x_coordinate + + @property + def y_coordinate(self): + """Y position coordinate. + + :rtype: float + :returns: Y position coordinate. + """ + return self._y_coordinate + + @property + def z_coordinate(self): + """Z position coordinate. + + :rtype: float + :returns: Z position coordinate. + """ + return self._z_coordinate + + +class Vertex(object): + """A vertex represents a 2D point in the image. + + See: + https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#Vertex + + :type x_coordinate: float + :param x_coordinate: X position coordinate. + + :type y_coordinate: float + :param y_coordinate: Y position coordinate. + """ + def __init__(self, x_coordinate=None, y_coordinate=None): + self._x_coordinate = x_coordinate + self._y_coordinate = y_coordinate + + @property + def x_coordinate(self): + """X position coordinate. + + :rtype: float + :returns: X position coordinate. + """ + return self._x_coordinate + + @property + def y_coordinate(self): + """Y position coordinate. + + :rtype: float + :returns: Y position coordinate. + """ + return self._y_coordinate diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py new file mode 100644 index 000000000000..f96103d6fcdd --- /dev/null +++ b/vision/google/cloud/vision/image.py @@ -0,0 +1,304 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Image represented by either a URI or byte stream.""" + + +from base64 import b64encode + +from google.cloud.proto.vision.v1 import image_annotator_pb2 + +from google.cloud.vision._gax import _to_gapic_image +from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _bytes_to_unicode +from google.cloud.vision.feature import Feature +from google.cloud.vision.feature import FeatureTypes + + +class Image(object): + """Image representation containing information to be annotate. + + :type content: bytes + :param content: Byte stream of an image. + + :type filename: str + :param filename: Filename to image. + + :type source_uri: str + :param source_uri: URL or Google Cloud Storage URI of image. + + :type client: :class:`~google.cloud.vision.client.Client` + :param client: Instance of Vision client. + """ + + def __init__(self, client, content=None, filename=None, source_uri=None): + sources = [source for source in (content, filename, source_uri) + if source is not None] + if len(sources) != 1: + raise ValueError( + 'Specify exactly one of "content", "filename", or ' + '"source_uri".') + + self.client = client + + if filename is not None: + with open(filename, 'rb') as file_obj: + content = file_obj.read() + + if content is not None: + content = _to_bytes(content) + + self._content = content + self._source = source_uri + + def as_dict(self): + """Generate dictionary structure for request. + + :rtype: dict + :returns: Dictionary with source information for image. + """ + if self.content: + return { + 'content': _bytes_to_unicode(b64encode(self.content)) + } + elif self.source.startswith('gs://'): + return { + 'source': { + 'gcs_image_uri': self.source + } + } + elif self.source.startswith(('http://', 'https://')): + return { + 'source': { + 'image_uri': self.source + } + } + raise ValueError('No image content or source found.') + + @property + def content(self): + """Base64 encoded image content. + + :rtype: str + :returns: Base64 encoded image bytes. + """ + return self._content + + @property + def source(self): + """Google Cloud Storage URI. + + :rtype: str + :returns: String of Google Cloud Storage URI. + """ + return self._source + + def _detect_annotation(self, images): + """Generic method for detecting annotations. + + :type images: list + :param images: List of :class:`~google.cloud.vision.image.Image`. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.annotations.Annotations`. + """ + return self.client._vision_api.annotate(images) + + def _detect_annotation_from_pb(self, requests_pb=None): + """Helper for pre-made requests. + + :type requests_pb: list + :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageRequest` + + :rtype: :class:`~google.cloud.vision.annotations.Annotations` + :returns: Instance of ``Annotations``. + """ + return self.client._vision_api.annotate(self, requests_pb=requests_pb) + + def detect(self, features): + """Detect multiple feature types. + + :type features: list of :class:`~google.cloud.vision.feature.Feature` + :param features: List of the ``Feature`` indication the type of + annotation to perform. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + images = ((self, features),) + return self._detect_annotation(images) + + def detect_crop_hints(self, aspect_ratios=None, limit=10): + """Detect crop hints in image. + + :type aspect_ratios: list + :param aspect_ratios: (Optional) List of floats i.e. 4/3 == 1.33333. A + maximum of 16 aspect ratios can be given. + + :type limit: int + :param limit: (Optional) The number of crop hints to detect. + + :rtype: list + :returns: List of :class:`~google.cloud.vision.crop_hint.CropHints`. + """ + feature_type = image_annotator_pb2.Feature.CROP_HINTS + feature = image_annotator_pb2.Feature(type=feature_type, + max_results=limit) + image = _to_gapic_image(self) + crop_hints_params = image_annotator_pb2.CropHintsParams( + aspect_ratios=aspect_ratios) + image_context = image_annotator_pb2.ImageContext( + crop_hints_params=crop_hints_params) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + + annotations = self._detect_annotation_from_pb([request]) + return annotations[0].crop_hints + + def detect_faces(self, limit=10): + """Detect faces in image. + + :type limit: int + :param limit: The number of faces to try and detect. + + :rtype: list + :returns: List of :class:`~google.cloud.vision.face.Face`. + """ + features = [Feature(FeatureTypes.FACE_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].faces + + def detect_full_text(self, language_hints=None, limit=10): + """Detect a full document's text. + + :type language_hints: list + :param language_hints: (Optional) A list of BCP-47 language codes. See: + https://cloud.google.com/vision/docs/languages + + :type limit: int + :param limit: (Optional) The number of documents to detect. + + :rtype: list + :returns: List of :class:`~google.cloud.vision.text.TextAnnotation`. + """ + feature_type = image_annotator_pb2.Feature.DOCUMENT_TEXT_DETECTION + feature = image_annotator_pb2.Feature(type=feature_type, + max_results=limit) + image = _to_gapic_image(self) + image_context = image_annotator_pb2.ImageContext( + language_hints=language_hints) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + annotations = self._detect_annotation_from_pb([request]) + return annotations[0].full_texts + + def detect_labels(self, limit=10): + """Detect labels that describe objects in an image. + + :type limit: int + :param limit: The maximum number of labels to try and detect. + + :rtype: list + :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation` + """ + features = [Feature(FeatureTypes.LABEL_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].labels + + def detect_landmarks(self, limit=10): + """Detect landmarks in an image. + + :type limit: int + :param limit: The maximum number of landmarks to find. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + features = [Feature(FeatureTypes.LANDMARK_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].landmarks + + def detect_logos(self, limit=10): + """Detect logos in an image. + + :type limit: int + :param limit: The maximum number of logos to find. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + features = [Feature(FeatureTypes.LOGO_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].logos + + def detect_properties(self, limit=10): + """Detect the color properties of an image. + + :type limit: int + :param limit: The maximum number of image properties to find. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. + """ + features = [Feature(FeatureTypes.IMAGE_PROPERTIES, limit)] + annotations = self.detect(features) + return annotations[0].properties + + def detect_safe_search(self, limit=10): + """Retreive safe search properties from an image. + + :type limit: int + :param limit: The number of faces to try and detect. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.sage.SafeSearchAnnotation`. + """ + features = [Feature(FeatureTypes.SAFE_SEARCH_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].safe_searches + + def detect_text(self, limit=10): + """Detect text in an image. + + :type limit: int + :param limit: The maximum instances of text to find. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + features = [Feature(FeatureTypes.TEXT_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].texts + + def detect_web(self, limit=10): + """Detect similar images elsewhere on the web. + + :type limit: int + :param limit: The maximum instances of text to find. + + :rtype: list + :returns: List of + :class:`~google.cloud.vision.entity.EntityAnnotation`. + """ + features = [Feature(FeatureTypes.WEB_DETECTION, limit)] + annotations = self.detect(features) + return annotations[0].web diff --git a/vision/google/cloud/vision/likelihood.py b/vision/google/cloud/vision/likelihood.py new file mode 100644 index 000000000000..6fffc6640739 --- /dev/null +++ b/vision/google/cloud/vision/likelihood.py @@ -0,0 +1,47 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Likelihood constants returned from Vision API.""" + + +from enum import Enum + +from google.cloud.proto.vision.v1 import image_annotator_pb2 + + +def _get_pb_likelihood(likelihood): + """Convert protobuf Likelihood integer value to Likelihood enum. + + :type likelihood: int + :param likelihood: Protobuf integer representing ``Likelihood``. + + :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` + :returns: Enum ``Likelihood`` converted from protobuf value. + """ + likelihood_pb = image_annotator_pb2.Likelihood.Name(likelihood) + return Likelihood[likelihood_pb] + + +class Likelihood(Enum): + """A representation of likelihood to give stable results across upgrades. + + See: + https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#likelihood + """ + UNKNOWN = 'UNKNOWN' + VERY_UNLIKELY = 'VERY_UNLIKELY' + UNLIKELY = 'UNLIKELY' + POSSIBLE = 'POSSIBLE' + LIKELY = 'LIKELY' + VERY_LIKELY = 'VERY_LIKELY' diff --git a/vision/google/cloud/vision/safe_search.py b/vision/google/cloud/vision/safe_search.py new file mode 100644 index 000000000000..d439d9ed6015 --- /dev/null +++ b/vision/google/cloud/vision/safe_search.py @@ -0,0 +1,116 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Safe search class for information returned from annotating an image.""" + +from google.cloud.vision.likelihood import _get_pb_likelihood +from google.cloud.vision.likelihood import Likelihood + + +class SafeSearchAnnotation(object): + """Representation of a SafeSearchAnnotation. + + :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood` + :param adult_likelihood: Likelihood that image contains adult material. + + :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood` + :param spoof_likelihood: Likelihood that image is a spoof. + + :type medical_likelihood: + :class:`~google.cloud.vision.likelihood.Likelihood` + :param medical_likelihood: Likelihood that image contains medical material. + + :type violence_likelihood: + :class:`~google.cloud.vision.likelihood.Likelihood` + :param violence_likelihood: Likelihood that image contains violence. + """ + + def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood, + violence_likelihood): + self._adult_likelihood = adult_likelihood + self._spoof_likelihood = spoof_likelihood + self._medical_likeliehood = medical_likelihood + self._violence_likelihood = violence_likelihood + + @classmethod + def from_api_repr(cls, response): + """Factory: construct SafeSearchAnnotation from Vision API response. + + :type response: dict + :param response: Dictionary response from Vision API with safe search + data. + + :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` + :returns: Instance of ``SafeSearchAnnotation``. + """ + adult_likelihood = Likelihood[response['adult']] + spoof_likelihood = Likelihood[response['spoof']] + medical_likelihood = Likelihood[response['medical']] + violence_likelihood = Likelihood[response['violence']] + + return cls(adult_likelihood, spoof_likelihood, medical_likelihood, + violence_likelihood) + + @classmethod + def from_pb(cls, image): + """Factory: construct SafeSearchAnnotation from Vision API response. + + :type image: :class:`~google.cloud.proto.vision.v1.\ + image_annotator_pb2.SafeSearchAnnotation` + :param image: Protobuf response from Vision API with safe search data. + + :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` + :returns: Instance of ``SafeSearchAnnotation``. + """ + values = [image.adult, image.spoof, image.medical, image.violence] + classifications = map(_get_pb_likelihood, values) + return cls(*classifications) + + @property + def adult(self): + """Represents the adult contents likelihood for the image. + + :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` + :returns: ``Likelihood`` of the image containing adult content. + """ + return self._adult_likelihood + + @property + def spoof(self): + """The likelihood that an obvious modification was made to the image. + + :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` + :returns: The ``Likelihood`` that an obvious modification was made to + the image's canonical version to make it appear funny or + offensive. + """ + return self._spoof_likelihood + + @property + def medical(self): + """Likelihood this is a medical image. + + :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` + :returns: The ``Likelihood`` that the image is medical in origin. + """ + return self._medical_likeliehood + + @property + def violence(self): + """Likeliehood that this image contains violence. + + :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` + :returns: The ``Likelihood`` that the image contains violence. + """ + return self._violence_likelihood diff --git a/vision/google/cloud/vision/text.py b/vision/google/cloud/vision/text.py new file mode 100644 index 000000000000..b903c3547b4b --- /dev/null +++ b/vision/google/cloud/vision/text.py @@ -0,0 +1,83 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Text annotations of an image.""" + +import json + +from google.cloud.proto.vision.v1 import text_annotation_pb2 +from google.protobuf import json_format + + +class TextAnnotation(object): + """Representation of a text annotation returned from the Vision API. + + :type pages: list + :param pages: List of + :class:`~google.cloud.proto.vision.v1.\ + text_annotation_pb2.Page`. + + :type text: str + :param text: String containing text detected from the image. + """ + def __init__(self, pages, text): + self._pages = pages + self._text = text + + @classmethod + def from_api_repr(cls, annotation): + """Factory: construct an instance of ``TextAnnotation`` from JSON. + + :type annotation: dict + :param annotation: Dictionary response from Vision API. + + :rtype: :class:`~google.cloud.vision.text.TextAnnotation` + :returns: Instance of ``TextAnnotation``. + """ + annotation_json = json.dumps(annotation) + text_annotation = text_annotation_pb2.TextAnnotation() + json_format.Parse(annotation_json, text_annotation) + return cls(text_annotation.pages, text_annotation.text) + + @classmethod + def from_pb(cls, annotation): + """Factory: construct an instance of ``TextAnnotation`` from protobuf. + + :type annotation: :class:`~google.cloud.proto.vision.v1.\ + text_annotation_pb2.TextAnnotation` + :param annotation: Populated instance of ``TextAnnotation``. + + :rtype: :class:`~google.cloud.vision.text.TextAnnotation` + :returns: Populated instance of ``TextAnnotation``. + """ + return cls(annotation.pages, annotation.text) + + @property + def pages(self): + """Pages found in text image. + + :rtype: list + :returns: List of :class:`~google.cloud.proto.vision.v1.\ + text_annotation_pb2.Page`. + """ + return self._pages + + @property + def text(self): + """Text detected from an image. + + :rtype: str + :returns: String of text found in an image. + """ + return self._text diff --git a/vision/google/cloud/vision/web.py b/vision/google/cloud/vision/web.py new file mode 100644 index 000000000000..01e8cb2c6899 --- /dev/null +++ b/vision/google/cloud/vision/web.py @@ -0,0 +1,335 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Web image search.""" + + +class WebDetection(object): + """Representation of a web detection sent from the Vision API. + + :type web_entities: list + :param web_entities: List of + :class:`google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.WebEntity`. + + :type full_matching_images: list + :param full_matching_images: List of + :class:`google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.WebImage`. + + :type partial_matching_images: list + :param partial_matching_images: List of + :class:`google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.WebImage`. + + :type pages_with_matching_images: list + :param pages_with_matching_images: List of + :class:`google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.\ + WebPage`. + """ + def __init__(self, web_entities=(), full_matching_images=(), + partial_matching_images=(), pages_with_matching_images=()): + self._web_entities = web_entities + self._full_matching_images = full_matching_images + self._partial_matching_images = partial_matching_images + self._pages_with_matching_images = pages_with_matching_images + + @classmethod + def from_api_repr(cls, detection): + """Factory: construct ``WebDetection`` from Vision API response. + + :type detection: dict + :param detection: Dictionary representing a ``WebDetection``. + + :rtype: :class:`~google.cloud.vision.web.WebDetection` + :returns: Populated instance of ``WebDetection``. + """ + web_entities = detection.get('webEntities') + full_matching_images = detection.get('fullMatchingImages') + partial_matching_images = detection.get('partialMatchingImages') + pages_with_matching_images = detection.get('pagesWithMatchingImages') + + web_detection = { + 'web_entities': [WebEntity.from_api_repr(web_entity) + for web_entity in web_entities], + 'full_matching_images': [WebImage.from_api_repr(web_image) + for web_image in full_matching_images], + 'partial_matching_images': [WebImage.from_api_repr(web_image) + for web_image + in partial_matching_images], + 'pages_with_matching_images': [WebPage.from_api_repr(web_page) + for web_page + in pages_with_matching_images], + } + return cls(**web_detection) + + @classmethod + def from_pb(cls, detection): + """Factory: construct ``WebDetection`` from Vision API response. + + :type detection: :class:`~google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection` + :param detection: Dictionary representing a ``WebDetection``. + + :rtype: :class:`~google.cloud.vision.web.WebDetection` + :returns: Populated instance of ``WebDetection``. + """ + web_entities = [WebEntity.from_pb(web_entity) + for web_entity in detection.web_entities] + full_image_matches = [WebImage.from_pb(web_image) + for web_image in detection.full_matching_images] + partial_image_matches = [WebImage.from_pb(web_image) + for web_image + in detection.partial_matching_images] + pages_with_images = [WebPage.from_pb(web_page) + for web_page + in detection.pages_with_matching_images] + return cls(web_entities, full_image_matches, partial_image_matches, + pages_with_images) + + @property + def web_entities(self): + """Return the web entities. + + :rtype: list + :returns: A list of ``WebEntity`` instances. + """ + return self._web_entities + + @property + def full_matching_images(self): + """Return the full matching images. + + :rtype: list + :returns: A list of ``WebImage`` instances. + """ + return self._full_matching_images + + @property + def partial_matching_images(self): + """Return the partially matching images. + + :rtype: list + :returns: A list of ``WebImage`` instances. + """ + return self._partial_matching_images + + @property + def pages_with_matching_images(self): + """Return the web pages with matching images. + + :rtype: list + :returns: A list of ``WebPage`` instances. + """ + return self._pages_with_matching_images + + +class WebEntity(object): + """Object containing a web entity sent from the Vision API. + + :type entity_id: str + :param entity_id: ID string for the entity. + + :type score: float + :param score: Overall relevancy score for the entity. + + :type description: str + :param description: Description of the entity. + """ + + def __init__(self, entity_id, score, description): + self._entity_id = entity_id + self._score = score + self._description = description + + @classmethod + def from_api_repr(cls, web_entity): + """Factory: construct ``WebImage`` from Vision API response. + + :type web_entity: dict + :param web_entity: Dictionary representing a web entity + + :rtype: :class:`~google.cloud.vision.web.WebEntity` + :returns: Populated instance of ``WebEntity``. + """ + return cls(web_entity.get('entityId'), web_entity.get('score'), + web_entity.get('description')) + + @classmethod + def from_pb(cls, web_entity): + """Factory: construct ``WebEntity`` from Vision API response. + + :type web_entity: :class:`~google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.WebEntity` + :param web_entity: Dictionary representing a web entity + + :rtype: :class:`~google.cloud.vision.web.WebEntity` + :returns: Populated instance of ``WebEntity``. + """ + return cls(web_entity.entity_id, web_entity.score, + web_entity.description) + + @property + def entity_id(self): + """The entity ID. + + :rtype: str + :returns: String representing the entity ID. Opaque. + """ + return self._entity_id + + @property + def score(self): + """Overall relevancy score for the image. + + .. note:: + + Not normalized nor comparable between requests. + + :rtype: float + :returns: Relevancy score as a float. + """ + return self._score + + @property + def description(self): + """Canonical description of the entity, in English. + + :rtype: str + :returns: Description of the entity. + """ + return self._description + + +class WebImage(object): + """Object containing image information elsewhere on the web. + + :type url: str + :param url: URL of the matched image. + + :type score: float + :param score: Overall relevancy score of the image. + """ + def __init__(self, url, score): + self._url = url + self._score = score + + @classmethod + def from_api_repr(cls, web_image): + """Factory: construct ``WebImage`` from Vision API response. + + :type web_image: dict + :param web_image: Dictionary representing a web image + + :rtype: :class:`~google.cloud.vision.web.WebImage` + :returns: Populated instance of ``WebImage``. + """ + return cls(web_image['url'], web_image['score']) + + @classmethod + def from_pb(cls, web_image): + """Factory: construct ``WebImage`` from Vision API response. + + :type web_image: :class:`~google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.WebImage` + :param web_image: Dictionary representing a web image + + :rtype: :class:`~google.cloud.vision.web.WebImage` + :returns: Populated instance of ``WebImage``. + """ + return cls(web_image.url, web_image.score) + + @property + def url(self): + """The URL of the matched image. + + :rtype: str + :returns: URL of matched image. + """ + return self._url + + @property + def score(self): + """Overall relevancy score for the image. + + .. note:: + + Not normalized nor comparable between requests. + + :rtype: float + :returns: Relevancy score as a float. + """ + return self._score + + +class WebPage(object): + """Web page that may contain this image or a similar one. + + :type url: str + :param url: URL of the matched image. + + :type score: float + :param score: Overall relevancy score of the image. + """ + def __init__(self, url, score): + self._url = url + self._score = score + + @classmethod + def from_api_repr(cls, web_page): + """Factory: construct ``WebPage`` from Vision API response. + + :type web_page: dict + :param web_page: Dictionary representing a web page + + :rtype: :class:`~google.cloud.vision.web.WebPage` + :returns: Populated instance of ``WebPage``. + """ + return cls(web_page['url'], web_page['score']) + + @classmethod + def from_pb(cls, web_page): + """Factory: construct ``WebPage`` from Vision API response. + + :type web_page: :class:`~google.cloud.proto.vision.v1.\ + web_detection_pb2.WebDetection.WebPage` + :param web_page: Dictionary representing a web image + + :rtype: :class:`~google.cloud.vision.web.WebPage` + :returns: Populated instance of ``WebPage``. + """ + return cls(web_page.url, web_page.score) + + @property + def url(self): + """The page URL. + + :rtype: str + :returns: String representing a URL. + """ + return self._url + + @property + def score(self): + """Overall relevancy score for the image. + + .. note:: + + Not normalized nor comparable between requests. + + :rtype: float + :returns: Relevancy score as a float. + """ + return self._score diff --git a/vision/google/cloud/vision_v1.py b/vision/google/cloud/vision_v1/__init__.py similarity index 72% rename from vision/google/cloud/vision_v1.py rename to vision/google/cloud/vision_v1/__init__.py index 49c077175f2d..8dbabb470724 100644 --- a/vision/google/cloud/vision_v1.py +++ b/vision/google/cloud/vision_v1/__init__.py @@ -17,13 +17,9 @@ from google.cloud.gapic.vision.v1 import image_annotator_client as iac from google.cloud.gapic.vision.v1 import enums -from google.cloud.proto.vision.v1 import geometry_pb2 as geometry -from google.cloud.proto.vision.v1 import image_annotator_pb2 as image_annotator -from google.cloud.proto.vision.v1 import text_annotation_pb2 as text_annotation -from google.cloud.proto.vision.v1 import web_detection_pb2 as web_detection - from google.cloud.vision.decorators import add_single_feature_methods from google.cloud.vision.helpers import VisionHelpers +from google.cloud.vision_v1 import types @add_single_feature_methods @@ -33,6 +29,7 @@ class ImageAnnotatorClient(VisionHelpers, iac.ImageAnnotatorClient): __all__ = ( - 'enums', 'geometry', 'image_annotator', 'ImageAnnotatorClient', - 'text_annotation', 'web_detection', + 'enums', + 'ImageAnnotatorClient', + 'types', ) diff --git a/vision/google/cloud/vision_v1/types.py b/vision/google/cloud/vision_v1/types.py new file mode 100644 index 000000000000..ba6c41137d2c --- /dev/null +++ b/vision/google/cloud/vision_v1/types.py @@ -0,0 +1,34 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import sys + +from google.cloud.proto.vision.v1 import geometry_pb2 +from google.cloud.proto.vision.v1 import image_annotator_pb2 +from google.cloud.proto.vision.v1 import text_annotation_pb2 +from google.cloud.proto.vision.v1 import web_detection_pb2 + +from google.gax.utils.messages import get_messages + + +names = [] +for module in (geometry_pb2, image_annotator_pb2, + text_annotation_pb2, web_detection_pb2): + for name, message in get_messages(module).items(): + setattr(sys.modules[__name__], name, message) + names.append(name) + + +__all__ = tuple(sorted(names)) diff --git a/vision/nox.py b/vision/nox.py index 751e1745b6f6..dfaeeb6e90e7 100644 --- a/vision/nox.py +++ b/vision/nox.py @@ -28,7 +28,7 @@ def unit_tests(session, python_version): session.interpreter = 'python{}'.format(python_version) # Install all test dependencies, then install this package in-place. - session.install('mock', 'pytest', 'pytest-cov') + session.install('mock', 'pytest', 'pytest-cov', '../core/') session.install('-e', '.') # Run py.test against the unit tests. @@ -42,7 +42,7 @@ def unit_tests(session, python_version): @nox.session @nox.parametrize('python_version', ['2.7', '3.6']) def system_tests(session, python_version): - """Run the unit test suite.""" + """Run the system test suite.""" # Run unit tests against all supported versions of Python. session.interpreter = 'python{}'.format(python_version) @@ -56,6 +56,23 @@ def system_tests(session, python_version): session.run('py.test', '--quiet', 'tests/system.py') +@nox.session +@nox.parametrize('python_version', ['2.7', '3.6']) +def system_tests_manual_layer(session, python_version): + """Run the system test suite for the old manual layer.""" + + # Run unit tests against all supported versions of Python. + session.interpreter = 'python{}'.format(python_version) + + # Install all test dependencies, then install this package in-place. + session.install('pytest', '../core/', '../storage/') + session.install('../test_utils/') + session.install('-e', '.') + + # Run py.test against the unit tests. + session.run('py.test', '--quiet', 'tests/system_old.py') + + @nox.session def lint(session): """Run flake8. diff --git a/vision/tests/system_old.py b/vision/tests/system_old.py new file mode 100644 index 000000000000..cddf399ddf5f --- /dev/null +++ b/vision/tests/system_old.py @@ -0,0 +1,744 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""System tests for Vision API.""" + +import os +import unittest + +import six + +from google.cloud import exceptions +from google.cloud import storage +from google.cloud import vision +from google.cloud.vision.entity import EntityAnnotation +from google.cloud.vision.feature import Feature +from google.cloud.vision.feature import FeatureTypes + +from test_utils.retry import RetryErrors +from test_utils.system import unique_resource_id + + +_SYS_TESTS_DIR = os.path.realpath(os.path.dirname(__file__)) +LOGO_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'logo.png') +FACE_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'faces.jpg') +LABEL_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'car.jpg') +LANDMARK_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'landmark.jpg') +TEXT_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'text.jpg') +FULL_TEXT_FILE = os.path.join(_SYS_TESTS_DIR, 'data', 'full-text.jpg') + + +class Config(object): + CLIENT = None + TEST_BUCKET = None + + +def setUpModule(): + Config.CLIENT = vision.Client() + storage_client = storage.Client() + bucket_name = 'new' + unique_resource_id() + Config.TEST_BUCKET = storage_client.bucket(bucket_name) + # 429 Too Many Requests in case API requests rate-limited. + retry_429 = RetryErrors(exceptions.TooManyRequests) + retry_429(Config.TEST_BUCKET.create)() + + +def tearDownModule(): + # 409 Conflict if the bucket is full. + # 429 Too Many Requests in case API requests rate-limited. + bucket_retry = RetryErrors( + (exceptions.TooManyRequests, exceptions.Conflict)) + bucket_retry(Config.TEST_BUCKET.delete)(force=True) + + +class BaseVisionTestCase(unittest.TestCase): + def _assert_coordinate(self, coordinate): + if coordinate is None: + return + self.assertIsNotNone(coordinate) + self.assertIsInstance(coordinate, (int, float)) + + def _assert_likelihood(self, likelihood): + from google.cloud.vision.likelihood import Likelihood + + levels = [Likelihood.UNKNOWN, Likelihood.VERY_LIKELY, + Likelihood.UNLIKELY, Likelihood.POSSIBLE, Likelihood.LIKELY, + Likelihood.VERY_UNLIKELY] + self.assertIn(likelihood, levels) + + def _pb_not_implemented_skip(self, message): + if Config.CLIENT._use_grpc: + self.skipTest(message) + + +class TestVisionFullText(unittest.TestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_full_text(self, full_text): + from google.cloud.vision.text import TextAnnotation + + self.assertIsInstance(full_text, TextAnnotation) + self.assertIsInstance(full_text.text, six.text_type) + self.assertEqual(len(full_text.pages), 1) + self.assertIsInstance(full_text.pages[0].width, int) + self.assertIsInstance(full_text.pages[0].height, int) + + def test_detect_full_text_content(self): + client = Config.CLIENT + with open(FULL_TEXT_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + full_text = image.detect_full_text(language_hints=['en']) + self._assert_full_text(full_text) + + def test_detect_full_text_filename(self): + client = Config.CLIENT + image = client.image(filename=FULL_TEXT_FILE) + full_text = image.detect_full_text(language_hints=['en']) + self._assert_full_text(full_text) + + def test_detect_full_text_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'full-text.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FULL_TEXT_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + client = Config.CLIENT + image = client.image(source_uri=source_uri) + full_text = image.detect_full_text(language_hints=['en']) + self._assert_full_text(full_text) + + +class TestVisionClientCropHint(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_crop_hint(self, hint): + from google.cloud.vision.crop_hint import CropHint + from google.cloud.vision.geometry import Bounds + + self.assertIsInstance(hint, CropHint) + self.assertIsInstance(hint.bounds, Bounds) + self.assertGreater(len(hint.bounds.vertices), 1) + self.assertIsInstance(hint.confidence, (int, float)) + self.assertIsInstance(hint.importance_fraction, float) + + def test_detect_crop_hints_content(self): + client = Config.CLIENT + with open(FACE_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + crop_hints = image.detect_crop_hints( + aspect_ratios=[1.3333, 1.7777], limit=2) + self.assertEqual(len(crop_hints), 2) + for hint in crop_hints: + self._assert_crop_hint(hint) + + def test_detect_crop_hints_filename(self): + client = Config.CLIENT + image = client.image(filename=FACE_FILE) + crop_hints = image.detect_crop_hints( + aspect_ratios=[1.3333, 1.7777], limit=2) + self.assertEqual(len(crop_hints), 2) + for hint in crop_hints: + self._assert_crop_hint(hint) + + def test_detect_crop_hints_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'faces.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FACE_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + client = Config.CLIENT + image = client.image(source_uri=source_uri) + crop_hints = image.detect_crop_hints( + aspect_ratios=[1.3333, 1.7777], limit=2) + self.assertEqual(len(crop_hints), 2) + for hint in crop_hints: + self._assert_crop_hint(hint) + + +class TestVisionClientLogo(unittest.TestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_logo(self, logo): + self.assertIsInstance(logo, EntityAnnotation) + self.assertEqual(logo.description, 'Google') + self.assertEqual(len(logo.bounds.vertices), 4) + self.assertEqual(logo.bounds.vertices[0].x_coordinate, 40) + self.assertEqual(logo.bounds.vertices[0].y_coordinate, 40) + self.assertEqual(logo.bounds.vertices[1].x_coordinate, 959) + self.assertEqual(logo.bounds.vertices[1].y_coordinate, 40) + self.assertEqual(logo.bounds.vertices[2].x_coordinate, 959) + self.assertEqual(logo.bounds.vertices[2].y_coordinate, 302) + self.assertEqual(logo.bounds.vertices[3].x_coordinate, 40) + self.assertEqual(logo.bounds.vertices[3].y_coordinate, 302) + self.assertTrue(logo.score > 0.25) + + def test_detect_logos_content(self): + client = Config.CLIENT + with open(LOGO_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + logos = image.detect_logos() + self.assertEqual(len(logos), 1) + logo = logos[0] + self._assert_logo(logo) + + def test_detect_logos_filename(self): + client = Config.CLIENT + image = client.image(filename=LOGO_FILE) + logos = image.detect_logos() + self.assertEqual(len(logos), 1) + logo = logos[0] + self._assert_logo(logo) + + def test_detect_logos_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'logo.png' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(LOGO_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + client = Config.CLIENT + image = client.image(source_uri=source_uri) + logos = image.detect_logos() + self.assertEqual(len(logos), 1) + logo = logos[0] + self._assert_logo(logo) + + +class TestVisionClientFace(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_landmarks(self, landmarks): + from google.cloud.vision.face import Landmark + from google.cloud.vision.face import LandmarkTypes + from google.cloud.vision.face import Position + + for landmark in LandmarkTypes: + if landmark is not LandmarkTypes.UNKNOWN_LANDMARK: + feature = getattr(landmarks, landmark.name.lower()) + self.assertIsInstance(feature, Landmark) + self.assertIsInstance(feature.position, Position) + self._assert_coordinate(feature.position.x_coordinate) + self._assert_coordinate(feature.position.y_coordinate) + self._assert_coordinate(feature.position.z_coordinate) + + def _assert_face(self, face): + from google.cloud.vision.face import Bounds + from google.cloud.vision.face import FDBounds + from google.cloud.vision.face import Face + from google.cloud.vision.face import Landmarks + from google.cloud.vision.geometry import Vertex + + self.assertIsInstance(face, Face) + self.assertGreater(face.detection_confidence, 0.0) + self._assert_likelihood(face.anger) + self._assert_likelihood(face.joy) + self._assert_likelihood(face.sorrow) + self._assert_likelihood(face.surprise) + self._assert_likelihood(face.image_properties.blurred) + self._assert_likelihood(face.image_properties.underexposed) + self._assert_likelihood(face.headwear) + self.assertNotEqual(face.angles.roll, 0.0) + self.assertNotEqual(face.angles.pan, 0.0) + self.assertNotEqual(face.angles.tilt, 0.0) + + self.assertIsInstance(face.bounds, Bounds) + for vertex in face.bounds.vertices: + self.assertIsInstance(vertex, Vertex) + self._assert_coordinate(vertex.x_coordinate) + self._assert_coordinate(vertex.y_coordinate) + + self.assertIsInstance(face.fd_bounds, FDBounds) + for vertex in face.fd_bounds.vertices: + self.assertIsInstance(vertex, Vertex) + self._assert_coordinate(vertex.x_coordinate) + self._assert_coordinate(vertex.y_coordinate) + + self.assertIsInstance(face.landmarks, Landmarks) + self._assert_landmarks(face.landmarks) + + def test_detect_faces_content(self): + client = Config.CLIENT + with open(FACE_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + faces = image.detect_faces() + self.assertEqual(len(faces), 5) + for face in faces: + self._assert_face(face) + + def test_detect_faces_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'faces.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FACE_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + client = Config.CLIENT + image = client.image(source_uri=source_uri) + faces = image.detect_faces() + self.assertEqual(len(faces), 5) + for face in faces: + self._assert_face(face) + + def test_detect_faces_filename(self): + client = Config.CLIENT + image = client.image(filename=FACE_FILE) + faces = image.detect_faces() + self.assertEqual(len(faces), 5) + for face in faces: + self._assert_face(face) + + +class TestVisionClientLabel(BaseVisionTestCase): + DESCRIPTIONS = ( + 'car', + 'vehicle', + 'land vehicle', + 'automotive design', + 'wheel', + 'automobile make', + 'luxury vehicle', + 'sports car', + 'performance car', + 'automotive exterior', + ) + + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_label(self, label): + self.assertIsInstance(label, EntityAnnotation) + self.assertIn(label.description, self.DESCRIPTIONS) + self.assertIsInstance(label.mid, six.text_type) + self.assertGreater(label.score, 0.0) + + def test_detect_labels_content(self): + client = Config.CLIENT + with open(LABEL_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + labels = image.detect_labels() + self.assertEqual(len(labels), 10) + for label in labels: + self._assert_label(label) + + def test_detect_labels_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'car.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(LABEL_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + client = Config.CLIENT + image = client.image(source_uri=source_uri) + labels = image.detect_labels() + self.assertEqual(len(labels), 10) + for label in labels: + self._assert_label(label) + + def test_detect_labels_filename(self): + client = Config.CLIENT + image = client.image(filename=LABEL_FILE) + labels = image.detect_labels() + self.assertEqual(len(labels), 10) + for label in labels: + self._assert_label(label) + + +class TestVisionClientLandmark(BaseVisionTestCase): + DESCRIPTIONS = ('Mount Rushmore',) + + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_landmark(self, landmark): + self.assertIsInstance(landmark, EntityAnnotation) + self.assertIn(landmark.description, self.DESCRIPTIONS) + self.assertEqual(len(landmark.locations), 1) + location = landmark.locations[0] + self._assert_coordinate(location.latitude) + self._assert_coordinate(location.longitude) + for vertex in landmark.bounds.vertices: + self._assert_coordinate(vertex.x_coordinate) + self._assert_coordinate(vertex.y_coordinate) + self.assertGreater(landmark.score, 0.2) + self.assertIsInstance(landmark.mid, six.text_type) + + def test_detect_landmark_content(self): + client = Config.CLIENT + with open(LANDMARK_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + landmarks = image.detect_landmarks() + self.assertEqual(len(landmarks), 1) + landmark = landmarks[0] + self._assert_landmark(landmark) + + def test_detect_landmark_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'landmark.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(LANDMARK_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + client = Config.CLIENT + image = client.image(source_uri=source_uri) + landmarks = image.detect_landmarks() + self.assertEqual(len(landmarks), 1) + landmark = landmarks[0] + self._assert_landmark(landmark) + + def test_detect_landmark_filename(self): + client = Config.CLIENT + image = client.image(filename=LANDMARK_FILE) + landmarks = image.detect_landmarks() + self.assertEqual(len(landmarks), 1) + landmark = landmarks[0] + self._assert_landmark(landmark) + + +class TestVisionClientSafeSearch(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_safe_search(self, safe_search): + from google.cloud.vision.safe_search import SafeSearchAnnotation + + self.assertIsInstance(safe_search, SafeSearchAnnotation) + self._assert_likelihood(safe_search.adult) + self._assert_likelihood(safe_search.spoof) + self._assert_likelihood(safe_search.medical) + self._assert_likelihood(safe_search.violence) + + def test_detect_safe_search_content(self): + client = Config.CLIENT + with open(FACE_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + safe_search = image.detect_safe_search() + self._assert_safe_search(safe_search) + + def test_detect_safe_search_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'faces.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FACE_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + client = Config.CLIENT + image = client.image(source_uri=source_uri) + safe_search = image.detect_safe_search() + self._assert_safe_search(safe_search) + + def test_detect_safe_search_filename(self): + client = Config.CLIENT + image = client.image(filename=FACE_FILE) + safe_search = image.detect_safe_search() + self._assert_safe_search(safe_search) + + +class TestVisionClientText(unittest.TestCase): + DESCRIPTIONS = ( + 'Do', + 'what', + 'is', + 'right,', + 'not', + 'what', + 'is', + 'easy', + 'Do what is\nright, not\nwhat is easy\n', + ) + + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_text(self, text): + self.assertIsInstance(text, EntityAnnotation) + self.assertIn(text.description, self.DESCRIPTIONS) + self.assertIn(text.locale, (None, '', 'en')) + self.assertIsInstance(text.score, (type(None), float)) + + def test_detect_text_content(self): + client = Config.CLIENT + with open(TEXT_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + texts = image.detect_text() + self.assertEqual(len(texts), 9) + for text in texts: + self._assert_text(text) + + def test_detect_text_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'text.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(TEXT_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + client = Config.CLIENT + image = client.image(source_uri=source_uri) + texts = image.detect_text() + self.assertEqual(len(texts), 9) + for text in texts: + self._assert_text(text) + + def test_detect_text_filename(self): + client = Config.CLIENT + image = client.image(filename=TEXT_FILE) + texts = image.detect_text() + self.assertEqual(len(texts), 9) + for text in texts: + self._assert_text(text) + + +class TestVisionClientImageProperties(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_color(self, color): + self.assertIsInstance(color.red, float) + self.assertIsInstance(color.green, float) + self.assertIsInstance(color.blue, float) + self.assertIsInstance(color.alpha, float) + self.assertNotEqual(color.red, 0.0) + self.assertNotEqual(color.green, 0.0) + self.assertNotEqual(color.blue, 0.0) + + def _assert_properties(self, image_property): + from google.cloud.vision.color import ImagePropertiesAnnotation + + self.assertIsInstance(image_property, ImagePropertiesAnnotation) + results = image_property.colors + for color_info in results: + self._assert_color(color_info.color) + self.assertNotEqual(color_info.pixel_fraction, 0.0) + self.assertNotEqual(color_info.score, 0.0) + + def test_detect_properties_content(self): + client = Config.CLIENT + with open(FACE_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + properties = image.detect_properties() + self._assert_properties(properties) + + def test_detect_properties_gcs(self): + client = Config.CLIENT + bucket_name = Config.TEST_BUCKET.name + blob_name = 'faces.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FACE_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + image = client.image(source_uri=source_uri) + properties = image.detect_properties() + self._assert_properties(properties) + + def test_detect_properties_filename(self): + client = Config.CLIENT + image = client.image(filename=FACE_FILE) + properties = image.detect_properties() + self._assert_properties(properties) + + +class TestVisionBatchProcessing(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def test_batch_detect_gcs(self): + client = Config.CLIENT + bucket_name = Config.TEST_BUCKET.name + + # Logo GCS image. + blob_name = 'logos.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(LOGO_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + logo_source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + image_one = client.image(source_uri=logo_source_uri) + logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 2) + + # Faces GCS image. + blob_name = 'faces.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FACE_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + face_source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + image_two = client.image(source_uri=face_source_uri) + face_feature = Feature(FeatureTypes.FACE_DETECTION, 2) + + batch = client.batch() + batch.add_image(image_one, [logo_feature]) + batch.add_image(image_two, [face_feature, logo_feature]) + results = batch.detect() + self.assertEqual(len(results), 2) + self.assertIsInstance(results[0], vision.annotations.Annotations) + self.assertIsInstance(results[1], vision.annotations.Annotations) + self.assertEqual(len(results[0].logos), 1) + self.assertEqual(len(results[0].faces), 0) + + self.assertEqual(len(results[1].logos), 0) + self.assertEqual(len(results[1].faces), 2) + + +class TestVisionWebAnnotation(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_web_entity(self, web_entity): + from google.cloud.vision.web import WebEntity + + self.assertIsInstance(web_entity, WebEntity) + self.assertIsInstance(web_entity.entity_id, six.text_type) + self.assertIsInstance(web_entity.score, float) + self.assertIsInstance(web_entity.description, six.text_type) + + def _assert_web_image(self, web_image): + from google.cloud.vision.web import WebImage + + self.assertIsInstance(web_image, WebImage) + self.assertIsInstance(web_image.url, six.text_type) + self.assertIsInstance(web_image.score, float) + + def _assert_web_page(self, web_page): + from google.cloud.vision.web import WebPage + + self.assertIsInstance(web_page, WebPage) + self.assertIsInstance(web_page.url, six.text_type) + self.assertIsInstance(web_page.score, float) + + def _assert_web_images(self, web_images, limit): + self.assertEqual(len(web_images.web_entities), limit) + for web_entity in web_images.web_entities: + self._assert_web_entity(web_entity) + + self.assertEqual(len(web_images.full_matching_images), limit) + for web_image in web_images.full_matching_images: + self._assert_web_image(web_image) + + self.assertEqual(len(web_images.partial_matching_images), limit) + for web_image in web_images.partial_matching_images: + self._assert_web_image(web_image) + + self.assertEqual(len(web_images.pages_with_matching_images), limit) + for web_page in web_images.pages_with_matching_images: + self._assert_web_page(web_page) + + @RetryErrors(unittest.TestCase.failureException) + def test_detect_web_images_from_content(self): + client = Config.CLIENT + with open(LANDMARK_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + limit = 3 + web_images = image.detect_web(limit=limit) + self._assert_web_images(web_images, limit) + + def test_detect_web_images_from_gcs(self): + client = Config.CLIENT + bucket_name = Config.TEST_BUCKET.name + blob_name = 'landmark.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(LANDMARK_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + + image = client.image(source_uri=source_uri) + limit = 5 + web_images = image.detect_web(limit=limit) + self._assert_web_images(web_images, limit) + + def test_detect_web_images_from_filename(self): + client = Config.CLIENT + image = client.image(filename=LANDMARK_FILE) + limit = 5 + web_images = image.detect_web(limit=limit) + self._assert_web_images(web_images, limit) diff --git a/vision/tests/unit/__init__.py b/vision/tests/unit/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vision/tests/unit/_fixtures.py b/vision/tests/unit/_fixtures.py new file mode 100644 index 000000000000..6075fb2eea31 --- /dev/null +++ b/vision/tests/unit/_fixtures.py @@ -0,0 +1,2001 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FULL_TEXT_RESPONSE = { + 'responses': [{ + 'fullTextAnnotation': { + 'pages': [{ + 'height': 1872, + 'property': { + 'detectedLanguages': [{ + 'languageCode': 'en' + }] + }, + 'blocks': [{ + 'blockType': 'TEXT', + 'property': { + 'detectedLanguages': [{ + 'languageCode': 'en' + }] + }, + 'boundingBox': { + 'vertices': [{ + 'y': 8, + 'x': 344 + }, { + 'y': 8, + 'x': 434 + }, { + 'y': 22, + 'x': 434 + }, { + 'y': 22, + 'x': 344 + }] + }, + 'paragraphs': [{ + 'property': { + 'detectedLanguages': [{ + 'languageCode': 'en' + }] + }, + 'words': [{ + 'symbols': [{ + 'property': { + 'detectedLanguages': [{ + 'languageCode': 'en' + }] + }, + 'text': 'T', + 'boundingBox': { + 'vertices': [{ + 'y': 8, + 'x': 344 + }, { + 'y': 8, + 'x': 352 + }, { + 'y': 22, + 'x': 352 + }, { + 'y': 22, + 'x': 344 + }] + } + }], + 'property': { + 'detectedLanguages': [{ + 'languageCode': 'en' + }] + }, + 'boundingBox': { + 'vertices': [{ + 'y': 8, + 'x': 377 + }, { + 'y': 8, + 'x': 434 + }, { + 'y': 22, + 'x': 434 + }, { + 'y': 22, + 'x': 377 + }] + } + }], + 'boundingBox': { + 'vertices': [{ + 'y': 8, + 'x': 344 + }, { + 'y': 8, + 'x': 434 + }, { + 'y': 22, + 'x': 434 + }, { + 'y': 22, + 'x': 344 + }] + } + }] + }], + 'width': 792 + }], + 'text': 'The Republic\nBy Plato' + } + }] +} + +CROP_HINTS_RESPONSE = { + "responses": [{ + "cropHintsAnnotation": { + "cropHints": [{ + "importanceFraction": 1.22, + "boundingPoly": { + "vertices": [{ + "x": 77 + }, { + "x": 1821 + }, { + "x": 1821, + "y": 1306 + }, { + "x": 77, + "y": 1306 + }] + }, + "confidence": 0.5 + }, { + "importanceFraction": 1.2099999, + "boundingPoly": { + "vertices": [{}, { + "x": 1959 + }, { + "x": 1959, + "y": 1096 + }, { + "y": 1096 + }] + }, + "confidence": 0.29999998 + }] + } + }] +} + + +IMAGE_PROPERTIES_RESPONSE = { + 'responses': [ + { + 'imagePropertiesAnnotation': { + 'dominantColors': { + 'colors': [ + { + 'color': { + 'red': 253, + 'green': 203, + 'blue': 65, + 'alpha': 0.0 + }, + 'score': 0.42258179, + 'pixelFraction': 0.025376344 + }, + { + 'color': { + 'red': 216, + 'green': 69, + 'blue': 56 + }, + 'score': 0.34945792, + 'pixelFraction': 0.026093191 + }, + { + 'color': { + 'red': 79, + 'green': 142, + 'blue': 245 + }, + 'score': 0.050921876, + 'pixelFraction': 0.014193549 + }, + { + 'color': { + 'red': 249, + 'green': 246, + 'blue': 246 + }, + 'score': 0.0059412993, + 'pixelFraction': 0.86896056 + }, + { + 'color': { + 'red': 222, + 'green': 119, + 'blue': 51 + }, + 'score': 0.0043299114, + 'pixelFraction': 0.00021505376 + }, + { + 'color': { + 'red': 226, + 'green': 138, + 'blue': 130 + }, + 'score': 0.0038594988, + 'pixelFraction': 0.00086021505 + }, + { + 'color': { + 'red': 165, + 'green': 194, + 'blue': 243 + }, + 'score': 0.0029492097, + 'pixelFraction': 0.0015053763 + }, + { + 'color': { + 'red': 231, + 'green': 169, + 'blue': 164 + }, + 'score': 0.0017002203, + 'pixelFraction': 0.00043010752 + }, + { + 'color': { + 'red': 137, + 'green': 98, + 'blue': 142 + }, + 'score': 0.0013974205, + 'pixelFraction': 0.00071684585 + }, + { + 'color': { + 'red': 239, + 'green': 179, + 'blue': 56 + }, + 'score': 0.050473157, + 'pixelFraction': 0.0022222223 + } + ] + } + } + } + ] +} + +LABEL_DETECTION_RESPONSE = { + 'responses': [ + { + 'labelAnnotations': [ + { + 'mid': '/m/0k4j', + 'description': 'automobile', + 'score': 0.9776855 + }, + { + 'mid': '/m/07yv9', + 'description': 'vehicle', + 'score': 0.947987 + }, + { + 'mid': '/m/07r04', + 'description': 'truck', + 'score': 0.88429511 + } + ] + } + ] +} + + +LANDMARK_DETECTION_RESPONSE = { + 'responses': [ + { + 'landmarkAnnotations': [ + { + 'mid': '/m/04gdr', + 'description': 'Louvre', + 'score': 0.67257267, + 'boundingPoly': { + 'vertices': [ + { + 'x': 1075, + 'y': 49 + }, + { + 'x': 1494, + 'y': 49 + }, + { + 'x': 1494, + 'y': 307 + }, + { + 'x': 1075, + 'y': 307 + } + ] + }, + 'locations': [ + { + 'latLng': { + 'latitude': 48.861013, + 'longitude': 2.335818 + } + } + ] + }, + { + 'mid': '/m/094llg', + 'description': 'Louvre Pyramid', + 'score': 0.53734678, + 'boundingPoly': { + 'vertices': [ + { + 'x': 227, + 'y': 274 + }, + { + 'x': 1471, + 'y': 274 + }, + { + 'x': 1471, + 'y': 624 + }, + { + 'x': 227, + 'y': 624 + } + ] + }, + 'locations': [ + { + 'latLng': { + 'latitude': 48.860749, + 'longitude': 2.336312 + } + } + ] + } + ] + } + ] +} + +LOGO_DETECTION_RESPONSE = { + 'responses': [ + { + 'logoAnnotations': [ + { + 'mid': '/m/05b5c', + 'description': 'Brand1', + 'score': 0.63192177, + 'boundingPoly': { + 'vertices': [ + { + 'x': 78, + 'y': 162 + }, + { + 'x': 282, + 'y': 162 + }, + { + 'x': 282, + 'y': 211 + }, + { + 'x': 78, + 'y': 211 + } + ] + } + }, + { + 'mid': '/m/0fpzzp', + 'description': 'Brand2', + 'score': 0.5492993, + 'boundingPoly': { + 'vertices': [ + { + 'x': 310, + 'y': 209 + }, + { + 'x': 477, + 'y': 209 + }, + { + 'x': 477, + 'y': 282 + }, + { + 'x': 310, + 'y': 282 + } + ] + } + } + ] + } + ] +} + +FACE_DETECTION_RESPONSE = { + 'responses': [{ + 'faceAnnotations': [{ + 'headwearLikelihood': 'VERY_UNLIKELY', + 'panAngle': 6.027647, + 'underExposedLikelihood': 'VERY_UNLIKELY', + 'landmarkingConfidence': 0.54453093, + 'detectionConfidence': 0.9863683, + 'joyLikelihood': 'VERY_LIKELY', + 'landmarks': [{ + 'position': { + 'y': 482.69385, + 'x': 1004.8003, + 'z': 0.0016593217 + }, + 'type': 'LEFT_EYE' + }, { + 'position': { + 'y': 470.90149, + 'x': 1218.9751, + 'z': 20.597967 + }, + 'type': 'RIGHT_EYE' + }, { + 'position': { + 'y': 441.46521, + 'x': 934.25629, + 'z': -1.1400928 + }, + 'type': 'LEFT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 449.2872, + 'x': 1059.306, + 'z': -47.195843 + }, + 'type': 'RIGHT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 446.05408, + 'x': 1163.678, + 'z': -37.211197 + }, + 'type': 'LEFT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 424.18341, + 'x': 1285.0209, + 'z': 34.844131 + }, + 'type': 'RIGHT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 485.18387, + 'x': 1113.4325, + 'z': -32.579361 + }, + 'type': 'MIDPOINT_BETWEEN_EYES' + }, { + 'position': { + 'y': 620.27032, + 'x': 1122.5671, + 'z': -51.019524 + }, + 'type': 'NOSE_TIP' + }, { + 'position': { + 'y': 674.32526, + 'x': 1117.0417, + 'z': 17.330631 + }, + 'type': 'UPPER_LIP' + }, { + 'position': { + 'y': 737.29736, + 'x': 1115.7112, + 'z': 54.076469 + }, + 'type': 'LOWER_LIP' + }, { + 'position': { + 'y': 680.62927, + 'x': 1017.0475, + 'z': 72.948006 + }, + 'type': 'MOUTH_LEFT' + }, { + 'position': { + 'y': 681.53552, + 'x': 1191.5186, + 'z': 87.198334 + }, + 'type': 'MOUTH_RIGHT' + }, { + 'position': { + 'y': 702.3808, + 'x': 1115.4193, + 'z': 42.56889 + }, + 'type': 'MOUTH_CENTER' + }, { + 'position': { + 'y': 606.68555, + 'x': 1169.0006, + 'z': 33.98217 + }, + 'type': 'NOSE_BOTTOM_RIGHT' + }, { + 'position': { + 'y': 612.71509, + 'x': 1053.9476, + 'z': 23.409685 + }, + 'type': 'NOSE_BOTTOM_LEFT' + }, { + 'position': { + 'y': 634.95532, + 'x': 1116.6818, + 'z': 3.386874 + }, + 'type': 'NOSE_BOTTOM_CENTER' + }, { + 'position': { + 'y': 476.70197, + 'x': 1009.2689, + 'z': -16.84004 + }, + 'type': 'LEFT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 491.64874, + 'x': 1049.3926, + 'z': 7.0493474 + }, + 'type': 'LEFT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 499.426, + 'x': 1003.9925, + 'z': 3.5417991 + }, + 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 482.37302, + 'x': 964.48242, + 'z': 14.96223 + }, + 'type': 'LEFT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 487.90195, + 'x': 1005.3607, + 'z': -4.7375555 + }, + 'type': 'LEFT_EYE_PUPIL' + }, { + 'position': { + 'y': 468.33276, + 'x': 1212.7329, + 'z': 3.5585577 + }, + 'type': 'RIGHT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 470.92487, + 'x': 1251.7043, + 'z': 43.794273 + }, + 'type': 'RIGHT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 486.98676, + 'x': 1217.4629, + 'z': 23.580008 + }, + 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 482.41071, + 'x': 1173.4624, + 'z': 18.852427 + }, + 'type': 'RIGHT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 479.32739, + 'x': 1213.9757, + 'z': 16.041821 + }, + 'type': 'RIGHT_EYE_PUPIL' + }, { + 'position': { + 'y': 424.38797, + 'x': 1001.2206, + 'z': -46.463905 + }, + 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 415.33655, + 'x': 1221.9457, + 'z': -24.29454 + }, + 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 506.88251, + 'x': 851.96124, + 'z': 257.9054 + }, + 'type': 'LEFT_EAR_TRAGION' + }, { + 'position': { + 'y': 487.9679, + 'x': 1313.8328, + 'z': 304.29816 + }, + 'type': 'RIGHT_EAR_TRAGION' + }, { + 'position': { + 'y': 447.98254, + 'x': 1114.1573, + 'z': -50.620598 + }, + 'type': 'FOREHEAD_GLABELLA' + }, { + 'position': { + 'y': 815.3302, + 'x': 1113.27, + 'z': 109.69422 + }, + 'type': 'CHIN_GNATHION' + }, { + 'position': { + 'y': 656.20123, + 'x': 884.34106, + 'z': 223.19124 + }, + 'type': 'CHIN_LEFT_GONION' + }, { + 'position': { + 'y': 639.291, + 'x': 1301.2404, + 'z': 265.00647 + }, + 'type': 'CHIN_RIGHT_GONION' + }], + 'sorrowLikelihood': 'VERY_UNLIKELY', + 'surpriseLikelihood': 'VERY_UNLIKELY', + 'tiltAngle': -18.412321, + 'angerLikelihood': 'VERY_UNLIKELY', + 'boundingPoly': { + 'vertices': [{ + 'y': 58, + 'x': 748 + }, { + 'y': 58, + 'x': 1430 + }, { + 'y': 851, + 'x': 1430 + }, { + 'y': 851, + 'x': 748 + }] + }, + 'rollAngle': -0.43419784, + 'blurredLikelihood': 'VERY_UNLIKELY', + 'fdBoundingPoly': { + 'vertices': [{ + 'y': 310, + 'x': 845 + }, { + 'y': 310, + 'x': 1379 + }, { + 'y': 844, + 'x': 1379 + }, { + 'y': 844, + 'x': 845 + }] + } + }, { + 'headwearLikelihood': 'VERY_UNLIKELY', + 'panAngle': -12.609346, + 'underExposedLikelihood': 'VERY_UNLIKELY', + 'landmarkingConfidence': 0.56890666, + 'detectionConfidence': 0.96333671, + 'joyLikelihood': 'VERY_LIKELY', + 'landmarks': [{ + 'position': { + 'y': 604.24847, + 'x': 1655.8817, + 'z': -0.0023633335 + }, + 'type': 'LEFT_EYE' + }, { + 'position': { + 'y': 590.82428, + 'x': 1797.3677, + 'z': -30.984835 + }, + 'type': 'RIGHT_EYE' + }, { + 'position': { + 'y': 574.40173, + 'x': 1609.9617, + 'z': 14.634346 + }, + 'type': 'LEFT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 576.57483, + 'x': 1682.0824, + 'z': -41.733879 + }, + 'type': 'RIGHT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 571.701, + 'x': 1749.7633, + 'z': -56.105503 + }, + 'type': 'LEFT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 556.67511, + 'x': 1837.4333, + 'z': -35.228374 + }, + 'type': 'RIGHT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 600.41345, + 'x': 1720.1719, + 'z': -44.4393 + }, + 'type': 'MIDPOINT_BETWEEN_EYES' + }, { + 'position': { + 'y': 691.66907, + 'x': 1720.0095, + 'z': -63.878113 + }, + 'type': 'NOSE_TIP' + }, { + 'position': { + 'y': 731.63239, + 'x': 1733.2758, + 'z': -20.964622 + }, + 'type': 'UPPER_LIP' + }, { + 'position': { + 'y': 774.79138, + 'x': 1740.1494, + 'z': -0.038273316 + }, + 'type': 'LOWER_LIP' + }, { + 'position': { + 'y': 739.80981, + 'x': 1673.0156, + 'z': 35.655769 + }, + 'type': 'MOUTH_LEFT' + }, { + 'position': { + 'y': 728.8186, + 'x': 1808.8899, + 'z': 9.5512733 + }, + 'type': 'MOUTH_RIGHT' + }, { + 'position': { + 'y': 753.71118, + 'x': 1738.0863, + 'z': -5.2711153 + }, + 'type': 'MOUTH_CENTER' + }, { + 'position': { + 'y': 684.97522, + 'x': 1770.2415, + 'z': -18.243216 + }, + 'type': 'NOSE_BOTTOM_RIGHT' + }, { + 'position': { + 'y': 695.69922, + 'x': 1693.4669, + 'z': -0.6566487 + }, + 'type': 'NOSE_BOTTOM_LEFT' + }, { + 'position': { + 'y': 704.46063, + 'x': 1729.86, + 'z': -28.144602 + }, + 'type': 'NOSE_BOTTOM_CENTER' + }, { + 'position': { + 'y': 597.93713, + 'x': 1654.082, + 'z': -11.508363 + }, + 'type': 'LEFT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 605.889, + 'x': 1684.0094, + 'z': -5.0379925 + }, + 'type': 'LEFT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 614.40448, + 'x': 1656.4753, + 'z': 1.001922 + }, + 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 604.11292, + 'x': 1632.2733, + 'z': 18.163708 + }, + 'type': 'LEFT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 606.02026, + 'x': 1654.1372, + 'z': -3.3510325 + }, + 'type': 'LEFT_EYE_PUPIL' + }, { + 'position': { + 'y': 588.00885, + 'x': 1790.3329, + 'z': -41.150127 + }, + 'type': 'RIGHT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 590.46307, + 'x': 1824.5522, + 'z': -23.20849 + }, + 'type': 'RIGHT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 601.75946, + 'x': 1797.9852, + 'z': -29.095766 + }, + 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 598.66449, + 'x': 1768.7595, + 'z': -23.117319 + }, + 'type': 'RIGHT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 595.84918, + 'x': 1794.0195, + 'z': -33.897068 + }, + 'type': 'RIGHT_EYE_PUPIL' + }, { + 'position': { + 'y': 561.08679, + 'x': 1641.9266, + 'z': -26.653444 + }, + 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 550.38129, + 'x': 1789.6267, + 'z': -58.874447 + }, + 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 632.54456, + 'x': 1611.1659, + 'z': 198.83691 + }, + 'type': 'LEFT_EAR_TRAGION' + }, { + 'position': { + 'y': 610.1615, + 'x': 1920.511, + 'z': 131.28908 + }, + 'type': 'RIGHT_EAR_TRAGION' + }, { + 'position': { + 'y': 574.28448, + 'x': 1714.6324, + 'z': -54.497036 + }, + 'type': 'FOREHEAD_GLABELLA' + }, { + 'position': { + 'y': 830.93884, + 'x': 1752.2703, + 'z': 33.332912 + }, + 'type': 'CHIN_GNATHION' + }, { + 'position': { + 'y': 732.33936, + 'x': 1626.519, + 'z': 162.6319 + }, + 'type': 'CHIN_LEFT_GONION' + }, { + 'position': { + 'y': 712.21118, + 'x': 1905.7007, + 'z': 101.86344 + }, + 'type': 'CHIN_RIGHT_GONION' + }], + 'sorrowLikelihood': 'VERY_UNLIKELY', + 'surpriseLikelihood': 'VERY_UNLIKELY', + 'tiltAngle': -13.636207, + 'angerLikelihood': 'VERY_UNLIKELY', + 'boundingPoly': { + 'vertices': [{ + 'y': 319, + 'x': 1524 + }, { + 'y': 319, + 'x': 1959 + }, { + 'y': 859, + 'x': 1959 + }, { + 'y': 859, + 'x': 1524 + }] + }, + 'rollAngle': -7.1766233, + 'blurredLikelihood': 'VERY_UNLIKELY', + 'fdBoundingPoly': { + 'vertices': [{ + 'y': 485, + 'x': 1559 + }, { + 'y': 485, + 'x': 1920 + }, { + 'y': 846, + 'x': 1920 + }, { + 'y': 846, + 'x': 1559 + }] + } + }, { + 'headwearLikelihood': 'VERY_UNLIKELY', + 'panAngle': 8.7634687, + 'underExposedLikelihood': 'VERY_UNLIKELY', + 'landmarkingConfidence': 0.45491594, + 'detectionConfidence': 0.98870116, + 'joyLikelihood': 'VERY_LIKELY', + 'landmarks': [{ + 'position': { + 'y': 678.57886, + 'x': 397.22269, + 'z': 0.00052442803 + }, + 'type': 'LEFT_EYE' + }, { + 'position': { + 'y': 671.90381, + 'x': 515.38159, + 'z': 17.843918 + }, + 'type': 'RIGHT_EYE' + }, { + 'position': { + 'y': 657.13904, + 'x': 361.41068, + 'z': 6.1270714 + }, + 'type': 'LEFT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 649.82916, + 'x': 432.9726, + 'z': -16.12303 + }, + 'type': 'RIGHT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 646.04272, + 'x': 487.78485, + 'z': -7.638854 + }, + 'type': 'LEFT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 642.4032, + 'x': 549.46954, + 'z': 35.154259 + }, + 'type': 'RIGHT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 672.44031, + 'x': 462.86993, + 'z': -14.413016 + }, + 'type': 'MIDPOINT_BETWEEN_EYES' + }, { + 'position': { + 'y': 736.5896, + 'x': 474.0661, + 'z': -50.206612 + }, + 'type': 'NOSE_TIP' + }, { + 'position': { + 'y': 775.34973, + 'x': 472.54224, + 'z': -25.24843 + }, + 'type': 'UPPER_LIP' + }, { + 'position': { + 'y': 820.41418, + 'x': 474.41162, + 'z': -18.226196 + }, + 'type': 'LOWER_LIP' + }, { + 'position': { + 'y': 797.35547, + 'x': 415.29095, + 'z': 0.069621459 + }, + 'type': 'MOUTH_LEFT' + }, { + 'position': { + 'y': 786.58917, + 'x': 519.26709, + 'z': 13.945135 + }, + 'type': 'MOUTH_RIGHT' + }, { + 'position': { + 'y': 798.462, + 'x': 472.48071, + 'z': -17.317541 + }, + 'type': 'MOUTH_CENTER' + }, { + 'position': { + 'y': 742.13464, + 'x': 498.90826, + 'z': -1.8338414 + }, + 'type': 'NOSE_BOTTOM_RIGHT' + }, { + 'position': { + 'y': 747.218, + 'x': 438.95078, + 'z': -11.851667 + }, + 'type': 'NOSE_BOTTOM_LEFT' + }, { + 'position': { + 'y': 754.20105, + 'x': 472.47375, + 'z': -24.760784 + }, + 'type': 'NOSE_BOTTOM_CENTER' + }, { + 'position': { + 'y': 672.1994, + 'x': 403.39957, + 'z': -6.9005938 + }, + 'type': 'LEFT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 679.914, + 'x': 425.36029, + 'z': 4.3264537 + }, + 'type': 'LEFT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 687.11792, + 'x': 401.66464, + 'z': -0.79697126 + }, + 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 682.9585, + 'x': 378.93005, + 'z': 7.3909378 + }, + 'type': 'LEFT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 680.40326, + 'x': 401.7229, + 'z': -2.7444897 + }, + 'type': 'LEFT_EYE_PUPIL' + }, { + 'position': { + 'y': 663.39496, + 'x': 516.03217, + 'z': 10.454485 + }, + 'type': 'RIGHT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 670.74463, + 'x': 536.45978, + 'z': 31.652559 + }, + 'type': 'RIGHT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 679.21289, + 'x': 517.50879, + 'z': 16.653259 + }, + 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 676.06976, + 'x': 495.27335, + 'z': 14.956539 + }, + 'type': 'RIGHT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 671.41052, + 'x': 517.3429, + 'z': 15.007857 + }, + 'type': 'RIGHT_EYE_PUPIL' + }, { + 'position': { + 'y': 639.23633, + 'x': 396.8494, + 'z': -12.132922 + }, + 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 629.66724, + 'x': 518.96332, + 'z': 6.7055798 + }, + 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 750.20837, + 'x': 313.60855, + 'z': 127.8474 + }, + 'type': 'LEFT_EAR_TRAGION' + }, { + 'position': { + 'y': 728.68243, + 'x': 570.95, + 'z': 166.43564 + }, + 'type': 'RIGHT_EAR_TRAGION' + }, { + 'position': { + 'y': 646.05042, + 'x': 460.94397, + 'z': -16.196959 + }, + 'type': 'FOREHEAD_GLABELLA' + }, { + 'position': { + 'y': 869.36255, + 'x': 476.69009, + 'z': -4.4716644 + }, + 'type': 'CHIN_GNATHION' + }, { + 'position': { + 'y': 818.48083, + 'x': 340.65454, + 'z': 80.163544 + }, + 'type': 'CHIN_LEFT_GONION' + }, { + 'position': { + 'y': 800.17029, + 'x': 571.60297, + 'z': 115.88489 + }, + 'type': 'CHIN_RIGHT_GONION' + }], + 'sorrowLikelihood': 'VERY_UNLIKELY', + 'surpriseLikelihood': 'VERY_UNLIKELY', + 'tiltAngle': 2.1818738, + 'angerLikelihood': 'VERY_UNLIKELY', + 'boundingPoly': { + 'vertices': [{ + 'y': 481, + 'x': 257 + }, { + 'y': 481, + 'x': 636 + }, { + 'y': 922, + 'x': 636 + }, { + 'y': 922, + 'x': 257 + }] + }, + 'rollAngle': -4.8415074, + 'blurredLikelihood': 'VERY_UNLIKELY', + 'fdBoundingPoly': { + 'vertices': [{ + 'y': 597, + 'x': 315 + }, { + 'y': 597, + 'x': 593 + }, { + 'y': 874, + 'x': 593 + }, { + 'y': 874, + 'x': 315 + }] + } + }, { + 'headwearLikelihood': 'VERY_UNLIKELY', + 'panAngle': 13.486016, + 'underExposedLikelihood': 'VERY_UNLIKELY', + 'landmarkingConfidence': 0.22890881, + 'detectionConfidence': 0.91653949, + 'joyLikelihood': 'LIKELY', + 'landmarks': [{ + 'position': { + 'y': 549.30334, + 'x': 9.7225485, + 'z': 0.0014079071 + }, + 'type': 'LEFT_EYE' + }, { + 'position': { + 'y': 539.7489, + 'x': 128.87411, + 'z': 28.692257 + }, + 'type': 'RIGHT_EYE' + }, { + 'position': { + 'y': 523.62103, + 'x': -35.406662, + 'z': -0.67885911 + }, + 'type': 'LEFT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 519.99487, + 'x': 42.973644, + 'z': -18.105515 + }, + 'type': 'RIGHT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 514.23407, + 'x': 103.02193, + 'z': -4.1667485 + }, + 'type': 'LEFT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 505.69614, + 'x': 165.63609, + 'z': 47.583176 + }, + 'type': 'RIGHT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 540.9787, + 'x': 76.066139, + 'z': -11.183347 + }, + 'type': 'MIDPOINT_BETWEEN_EYES' + }, { + 'position': { + 'y': 615.48669, + 'x': 89.695564, + 'z': -41.252846 + }, + 'type': 'NOSE_TIP' + }, { + 'position': { + 'y': 658.39246, + 'x': 85.935593, + 'z': -9.70177 + }, + 'type': 'UPPER_LIP' + }, { + 'position': { + 'y': 703.04309, + 'x': 87.266853, + 'z': 2.6370313 + }, + 'type': 'LOWER_LIP' + }, { + 'position': { + 'y': 678.54712, + 'x': 31.584759, + 'z': 12.874522 + }, + 'type': 'MOUTH_LEFT' + }, { + 'position': { + 'y': 670.44092, + 'x': 126.54009, + 'z': 35.510525 + }, + 'type': 'MOUTH_RIGHT' + }, { + 'position': { + 'y': 677.92883, + 'x': 85.152267, + 'z': 0.89151889 + }, + 'type': 'MOUTH_CENTER' + }, { + 'position': { + 'y': 618.41052, + 'x': 112.767, + 'z': 14.021111 + }, + 'type': 'NOSE_BOTTOM_RIGHT' + }, { + 'position': { + 'y': 624.28644, + 'x': 45.776546, + 'z': -2.0218573 + }, + 'type': 'NOSE_BOTTOM_LEFT' + }, { + 'position': { + 'y': 632.9657, + 'x': 84.253586, + 'z': -12.025499 + }, + 'type': 'NOSE_BOTTOM_CENTER' + }, { + 'position': { + 'y': 541.79987, + 'x': 11.081995, + 'z': -8.7047234 + }, + 'type': 'LEFT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 549.57306, + 'x': 35.396069, + 'z': 6.4817863 + }, + 'type': 'LEFT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 557.55121, + 'x': 10.446005, + 'z': -0.37798333 + }, + 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 551.75134, + 'x': -16.862394, + 'z': 5.4017038 + }, + 'type': 'LEFT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 550.14355, + 'x': 8.5758247, + 'z': -3.3803346 + }, + 'type': 'LEFT_EYE_PUPIL' + }, { + 'position': { + 'y': 531.02594, + 'x': 131.48265, + 'z': 20.201307 + }, + 'type': 'RIGHT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 536.71674, + 'x': 151.31306, + 'z': 45.753532 + }, + 'type': 'RIGHT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 547.00037, + 'x': 130.27722, + 'z': 28.447813 + }, + 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 542.38531, + 'x': 106.59242, + 'z': 23.77187 + }, + 'type': 'RIGHT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 539.12781, + 'x': 132.16141, + 'z': 26.180428 + }, + 'type': 'RIGHT_EYE_PUPIL' + }, { + 'position': { + 'y': 506.64093, + 'x': 4.8589344, + 'z': -18.679537 + }, + 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 494.94244, + 'x': 135.53185, + 'z': 12.703153 + }, + 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 609.03503, + 'x': -98.89212, + 'z': 134.96341 + }, + 'type': 'LEFT_EAR_TRAGION' + }, { + 'position': { + 'y': 584.60681, + 'x': 174.55208, + 'z': 200.56409 + }, + 'type': 'RIGHT_EAR_TRAGION' + }, { + 'position': { + 'y': 514.88513, + 'x': 74.575394, + 'z': -15.91002 + }, + 'type': 'FOREHEAD_GLABELLA' + }, { + 'position': { + 'y': 755.372, + 'x': 86.603539, + 'z': 23.596317 + }, + 'type': 'CHIN_GNATHION' + }, { + 'position': { + 'y': 689.8385, + 'x': -67.949554, + 'z': 94.833694 + }, + 'type': 'CHIN_LEFT_GONION' + }, { + 'position': { + 'y': 667.89325, + 'x': 179.19363, + 'z': 154.18192 + }, + 'type': 'CHIN_RIGHT_GONION' + }], + 'sorrowLikelihood': 'VERY_UNLIKELY', + 'surpriseLikelihood': 'VERY_UNLIKELY', + 'tiltAngle': -4.1819687, + 'angerLikelihood': 'VERY_UNLIKELY', + 'boundingPoly': { + 'vertices': [{ + 'y': 322 + }, { + 'y': 322, + 'x': 252 + }, { + 'y': 800, + 'x': 252 + }, { + 'y': 800 + }] + }, + 'rollAngle': -4.1248608, + 'blurredLikelihood': 'LIKELY', + 'fdBoundingPoly': { + 'vertices': [{ + 'y': 450 + }, { + 'y': 450, + 'x': 235 + }, { + 'y': 745, + 'x': 235 + }, { + 'y': 745 + }] + } + }, { + 'headwearLikelihood': 'VERY_UNLIKELY', + 'panAngle': 4.0344138, + 'underExposedLikelihood': 'VERY_UNLIKELY', + 'landmarkingConfidence': 0.16798845, + 'detectionConfidence': 0.7605139, + 'joyLikelihood': 'VERY_LIKELY', + 'landmarks': [{ + 'position': { + 'y': 637.85211, + 'x': 676.09375, + 'z': 4.3306696e-05 + }, + 'type': 'LEFT_EYE' + }, { + 'position': { + 'y': 637.43292, + 'x': 767.7132, + 'z': 6.4413033 + }, + 'type': 'RIGHT_EYE' + }, { + 'position': { + 'y': 614.27075, + 'x': 642.07782, + 'z': 3.731837 + }, + 'type': 'LEFT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 617.27216, + 'x': 700.90112, + 'z': -19.774208 + }, + 'type': 'RIGHT_OF_LEFT_EYEBROW' + }, { + 'position': { + 'y': 617.15649, + 'x': 747.60974, + 'z': -16.511871 + }, + 'type': 'LEFT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 614.018, + 'x': 802.60638, + 'z': 14.954031 + }, + 'type': 'RIGHT_OF_RIGHT_EYEBROW' + }, { + 'position': { + 'y': 638.11755, + 'x': 724.42511, + 'z': -16.930967 + }, + 'type': 'MIDPOINT_BETWEEN_EYES' + }, { + 'position': { + 'y': 696.08392, + 'x': 725.82532, + 'z': -38.252609 + }, + 'type': 'NOSE_TIP' + }, { + 'position': { + 'y': 727.826, + 'x': 724.0116, + 'z': -11.615328 + }, + 'type': 'UPPER_LIP' + }, { + 'position': { + 'y': 760.22595, + 'x': 723.30157, + 'z': -0.454926 + }, + 'type': 'LOWER_LIP' + }, { + 'position': { + 'y': 738.67548, + 'x': 684.35724, + 'z': 13.192401 + }, + 'type': 'MOUTH_LEFT' + }, { + 'position': { + 'y': 738.53015, + 'x': 759.91022, + 'z': 18.485643 + }, + 'type': 'MOUTH_RIGHT' + }, { + 'position': { + 'y': 742.42737, + 'x': 723.45239, + 'z': -2.4991846 + }, + 'type': 'MOUTH_CENTER' + }, { + 'position': { + 'y': 698.4281, + 'x': 749.50385, + 'z': 1.1831931 + }, + 'type': 'NOSE_BOTTOM_RIGHT' + }, { + 'position': { + 'y': 698.48151, + 'x': 696.923, + 'z': -2.4809308 + }, + 'type': 'NOSE_BOTTOM_LEFT' + }, { + 'position': { + 'y': 708.10651, + 'x': 724.18506, + 'z': -14.418536 + }, + 'type': 'NOSE_BOTTOM_CENTER' + }, { + 'position': { + 'y': 632.12128, + 'x': 675.22388, + 'z': -7.2390652 + }, + 'type': 'LEFT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 638.59021, + 'x': 694.03516, + 'z': 1.7715795 + }, + 'type': 'LEFT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 644.33356, + 'x': 674.92206, + 'z': -0.037067439 + }, + 'type': 'LEFT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 637.16479, + 'x': 655.035, + 'z': 7.4372306 + }, + 'type': 'LEFT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 638.18683, + 'x': 673.39447, + 'z': -2.4558623 + }, + 'type': 'LEFT_EYE_PUPIL' + }, { + 'position': { + 'y': 631.96063, + 'x': 771.31744, + 'z': -0.51439536 + }, + 'type': 'RIGHT_EYE_TOP_BOUNDARY' + }, { + 'position': { + 'y': 636.94287, + 'x': 789.29443, + 'z': 16.814001 + }, + 'type': 'RIGHT_EYE_RIGHT_CORNER' + }, { + 'position': { + 'y': 644.21619, + 'x': 770.13458, + 'z': 6.6525826 + }, + 'type': 'RIGHT_EYE_BOTTOM_BOUNDARY' + }, { + 'position': { + 'y': 638.75732, + 'x': 752.51831, + 'z': 5.8927159 + }, + 'type': 'RIGHT_EYE_LEFT_CORNER' + }, { + 'position': { + 'y': 638.06738, + 'x': 772.04718, + 'z': 4.350193 + }, + 'type': 'RIGHT_EYE_PUPIL' + }, { + 'position': { + 'y': 604.87769, + 'x': 671.68707, + 'z': -15.778968 + }, + 'type': 'LEFT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 604.71191, + 'x': 775.98663, + 'z': -8.4828024 + }, + 'type': 'RIGHT_EYEBROW_UPPER_MIDPOINT' + }, { + 'position': { + 'y': 670.40063, + 'x': 605.07721, + 'z': 119.27386 + }, + 'type': 'LEFT_EAR_TRAGION' + }, { + 'position': { + 'y': 669.99823, + 'x': 823.42841, + 'z': 134.54482 + }, + 'type': 'RIGHT_EAR_TRAGION' + }, { + 'position': { + 'y': 616.47058, + 'x': 724.54547, + 'z': -21.861612 + }, + 'type': 'FOREHEAD_GLABELLA' + }, { + 'position': { + 'y': 801.31934, + 'x': 722.071, + 'z': 18.37034 + }, + 'type': 'CHIN_GNATHION' + }, { + 'position': { + 'y': 736.57159, + 'x': 617.91388, + 'z': 88.713562 + }, + 'type': 'CHIN_LEFT_GONION' + }, { + 'position': { + 'y': 736.21118, + 'x': 815.234, + 'z': 102.52047 + }, + 'type': 'CHIN_RIGHT_GONION' + }], + 'sorrowLikelihood': 'VERY_UNLIKELY', + 'surpriseLikelihood': 'VERY_UNLIKELY', + 'tiltAngle': -7.0173812, + 'angerLikelihood': 'VERY_UNLIKELY', + 'boundingPoly': { + 'vertices': [{ + 'y': 459, + 'x': 557 + }, { + 'y': 459, + 'x': 875 + }, { + 'y': 829, + 'x': 875 + }, { + 'y': 829, + 'x': 557 + }] + }, + 'rollAngle': 0.38634345, + 'blurredLikelihood': 'LIKELY', + 'fdBoundingPoly': { + 'vertices': [{ + 'y': 570, + 'x': 612 + }, { + 'y': 570, + 'x': 837 + }, { + 'y': 795, + 'x': 837 + }, { + 'y': 795, + 'x': 612 + }] + } + }] + }] +} + + +MULTIPLE_RESPONSE = { + 'responses': [ + { + 'labelAnnotations': [ + { + 'mid': '/m/0k4j', + 'description': 'automobile', + 'score': 0.9776855 + }, + { + 'mid': '/m/07yv9', + 'description': 'vehicle', + 'score': 0.947987 + }, + { + 'mid': '/m/07r04', + 'description': 'truck', + 'score': 0.88429511 + }, + ], + }, + { + 'safeSearchAnnotation': { + 'adult': 'VERY_UNLIKELY', + 'spoof': 'UNLIKELY', + 'medical': 'POSSIBLE', + 'violence': 'VERY_UNLIKELY' + }, + }, + ], +} + + +SAFE_SEARCH_DETECTION_RESPONSE = { + 'responses': [ + { + 'safeSearchAnnotation': { + 'adult': 'VERY_UNLIKELY', + 'spoof': 'UNLIKELY', + 'medical': 'POSSIBLE', + 'violence': 'VERY_UNLIKELY' + } + } + ] +} + + +TEXT_DETECTION_RESPONSE = { + 'responses': [ + { + 'textAnnotations': [ + { + 'locale': 'en', + 'description': 'Google CloudPlatform\n', + 'boundingPoly': { + 'vertices': [ + { + 'x': 129, + 'y': 694 + }, + { + 'x': 1375, + 'y': 694 + }, + { + 'x': 1375, + 'y': 835 + }, + { + 'x': 129, + 'y': 835 + } + ] + } + }, + { + 'description': 'Google', + 'boundingPoly': { + 'vertices': [ + { + 'x': 129, + 'y': 694 + }, + { + 'x': 535, + 'y': 694 + }, + { + 'x': 535, + 'y': 835 + }, + { + 'x': 129, + 'y': 835 + } + ] + } + }, + { + 'description': 'CloudPlatform', + 'boundingPoly': { + 'vertices': [ + { + 'x': 567, + 'y': 694 + }, + { + 'x': 1375, + 'y': 694 + }, + { + 'x': 1375, + 'y': 835 + }, + { + 'x': 567, + 'y': 835 + } + ] + } + } + ] + } + ] +} + + +WEB_DETECTION_RESPONSE = { + 'responses': [ + { + 'webDetection': { + 'partialMatchingImages': [{ + 'score': 0.9216, + 'url': 'https://cloud.google.com/vision' + }, { + 'score': 0.55520177, + 'url': 'https://cloud.google.com/vision' + }], + 'fullMatchingImages': [{ + 'score': 0.09591467, + 'url': 'https://cloud.google.com/vision' + }, { + 'score': 0.09591467, + 'url': 'https://cloud.google.com/vision' + }], + 'webEntities': [{ + 'entityId': '/m/019dvv', + 'score': 1470.4435, + 'description': 'Mount Rushmore National Memorial' + }, { + 'entityId': '/m/05_5t0l', + 'score': 0.9468027, + 'description': 'Landmark' + }], + 'pagesWithMatchingImages': [{ + 'score': 2.9996617, + 'url': 'https://cloud.google.com/vision' + }, { + 'score': 1.1980441, + 'url': 'https://cloud.google.com/vision' + }] + } + } + ] +} diff --git a/vision/tests/unit/test__gax.py b/vision/tests/unit/test__gax.py new file mode 100644 index 000000000000..b2c0ea5ab430 --- /dev/null +++ b/vision/tests/unit/test__gax.py @@ -0,0 +1,297 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mock + + +def _make_credentials(): + import google.auth.credentials + + return mock.Mock(spec=google.auth.credentials.Credentials) + + +class TestGAXClient(unittest.TestCase): + def _get_target_class(self): + from google.cloud.vision._gax import _GAPICVisionAPI + + return _GAPICVisionAPI + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + client = mock.Mock( + _credentials=_make_credentials(), + spec=['_credentials'], + ) + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + api = self._make_one(client) + self.assertIs(api._client, client) + + def test_gapic_credentials(self): + from google.cloud.gapic.vision.v1.image_annotator_client import ( + ImageAnnotatorClient) + + from google.cloud.vision import Client + + # Mock the GAPIC ImageAnnotatorClient, whose arguments we + # want to check. + with mock.patch.object(ImageAnnotatorClient, '__init__') as iac: + iac.return_value = None + + # Create the GAX client. + credentials = _make_credentials() + client = Client(credentials=credentials, project='foo') + self._make_one(client=client) + + # Assert that the GAPIC constructor was called once, and + # that the credentials were sent. + iac.assert_called_once() + _, _, kwargs = iac.mock_calls[0] + self.assertIs(kwargs['credentials'], credentials) + + def test_kwarg_lib_name(self): + from google.cloud.gapic.vision.v1.image_annotator_client import ( + ImageAnnotatorClient) + from google.cloud.vision import __version__ + from google.cloud.vision import Client + + # Mock the GAPIC ImageAnnotatorClient, whose arguments we + # want to check. + with mock.patch.object(ImageAnnotatorClient, '__init__') as iac: + iac.return_value = None + + # Create the GAX client. + client = Client(credentials=_make_credentials(), project='foo') + self._make_one(client=client) + + # Assert that the GAPIC constructor was called once, and + # that lib_name and lib_version were sent. + iac.assert_called_once() + _, _, kwargs = iac.mock_calls[0] + self.assertEqual(kwargs['lib_name'], 'gccl') + self.assertEqual(kwargs['lib_version'], __version__) + + def test_annotation(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + + client = mock.Mock(spec_set=['_credentials']) + feature = Feature(FeatureTypes.LABEL_DETECTION, 5) + image_content = b'abc 1 2 3' + image = Image(client, content=image_content) + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + mock_response = { + 'batch_annotate_images.return_value': + mock.Mock(responses=['mock response data']), + } + + gax_api._annotator_client = mock.Mock( + spec_set=['batch_annotate_images'], **mock_response) + + with mock.patch('google.cloud.vision._gax.Annotations') as mock_anno: + images = ((image, [feature]),) + gax_api.annotate(images) + mock_anno.from_pb.assert_called_with('mock response data') + gax_api._annotator_client.batch_annotate_images.assert_called() + + def test_annotate_no_requests(self): + client = mock.Mock(spec_set=['_credentials']) + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + response = gax_api.annotate() + self.assertEqual(response, []) + gax_api._annotator_client.batch_annotate_images.assert_not_called() + + def test_annotate_no_results(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + + client = mock.Mock(spec_set=['_credentials']) + feature = Feature(FeatureTypes.LABEL_DETECTION, 5) + image_content = b'abc 1 2 3' + image = Image(client, content=image_content) + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + mock_response = { + 'batch_annotate_images.return_value': mock.Mock(responses=[]), + } + + gax_api._annotator_client = mock.Mock( + spec_set=['batch_annotate_images'], **mock_response) + with mock.patch('google.cloud.vision._gax.Annotations'): + images = ((image, [feature]),) + response = gax_api.annotate(images) + self.assertEqual(len(response), 0) + self.assertIsInstance(response, list) + + gax_api._annotator_client.batch_annotate_images.assert_called() + + def test_annotate_multiple_results(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.cloud.vision.annotations import Annotations + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + + client = mock.Mock(spec_set=['_credentials']) + feature = Feature(FeatureTypes.LABEL_DETECTION, 5) + image_content = b'abc 1 2 3' + image = Image(client, content=image_content) + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + responses = [ + image_annotator_pb2.AnnotateImageResponse(), + image_annotator_pb2.AnnotateImageResponse(), + ] + response = image_annotator_pb2.BatchAnnotateImagesResponse( + responses=responses) + + gax_api._annotator_client = mock.Mock( + spec_set=['batch_annotate_images']) + gax_api._annotator_client.batch_annotate_images.return_value = response + images = ((image, [feature]),) + responses = gax_api.annotate(images) + + self.assertEqual(len(responses), 2) + self.assertIsInstance(responses[0], Annotations) + self.assertIsInstance(responses[1], Annotations) + gax_api._annotator_client.batch_annotate_images.assert_called() + + def test_annotate_with_pb_requests_results(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.cloud.vision.annotations import Annotations + + client = mock.Mock(spec_set=['_credentials']) + + feature_type = image_annotator_pb2.Feature.CROP_HINTS + feature = image_annotator_pb2.Feature(type=feature_type, max_results=2) + + image_content = b'abc 1 2 3' + image = image_annotator_pb2.Image(content=image_content) + + aspect_ratios = [1.3333, 1.7777] + crop_hints_params = image_annotator_pb2.CropHintsParams( + aspect_ratios=aspect_ratios) + image_context = image_annotator_pb2.ImageContext( + crop_hints_params=crop_hints_params) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + responses = [ + image_annotator_pb2.AnnotateImageResponse(), + image_annotator_pb2.AnnotateImageResponse(), + ] + response = image_annotator_pb2.BatchAnnotateImagesResponse( + responses=responses) + + gax_api._annotator_client = mock.Mock( + spec_set=['batch_annotate_images']) + gax_api._annotator_client.batch_annotate_images.return_value = response + responses = gax_api.annotate(requests_pb=[request]) + + self.assertEqual(len(responses), 2) + for annotation in responses: + self.assertIsInstance(annotation, Annotations) + gax_api._annotator_client.batch_annotate_images.assert_called() + + +class Test__to_gapic_feature(unittest.TestCase): + def _call_fut(self, feature): + from google.cloud.vision._gax import _to_gapic_feature + return _to_gapic_feature(feature) + + def test__to_gapic_feature(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + feature = Feature(FeatureTypes.LABEL_DETECTION, 5) + feature_pb = self._call_fut(feature) + self.assertIsInstance(feature_pb, image_annotator_pb2.Feature) + self.assertEqual(feature_pb.type, 4) + self.assertEqual(feature_pb.max_results, 5) + + +class Test__to_gapic_image(unittest.TestCase): + def _call_fut(self, image): + from google.cloud.vision._gax import _to_gapic_image + + return _to_gapic_image(image) + + def test__to_gapic_image_content(self): + from google.cloud.vision.image import Image + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + image_content = b'abc 1 2 3' + client = object() + image = Image(client, content=image_content) + image_pb = self._call_fut(image) + self.assertIsInstance(image_pb, image_annotator_pb2.Image) + self.assertEqual(image_pb.content, image_content) + + def test__to_gapic_gcs_image_uri(self): + from google.cloud.vision.image import Image + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + image_uri = 'gs://1234/34.jpg' + client = object() + image = Image(client, source_uri=image_uri) + image_pb = self._call_fut(image) + self.assertIsInstance(image_pb, image_annotator_pb2.Image) + self.assertEqual(image_pb.source.gcs_image_uri, image_uri) + + def test__to_gapic_image_uri(self): + from google.cloud.vision.image import Image + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + image_uri = 'http://1234/34.jpg' + client = object() + image = Image(client, source_uri=image_uri) + image_pb = self._call_fut(image) + self.assertIsInstance(image_pb, image_annotator_pb2.Image) + self.assertEqual(image_pb.source.image_uri, image_uri) + + def test__to_gapic_invalid_image_uri(self): + from google.cloud.vision.image import Image + + image_uri = 'ftp://1234/34.jpg' + client = object() + image = Image(client, source_uri=image_uri) + with self.assertRaises(ValueError): + self._call_fut(image) + + def test__to_gapic_with_empty_image(self): + image = mock.Mock( + content=None, source=None, spec=['content', 'source']) + with self.assertRaises(ValueError): + self._call_fut(image) diff --git a/vision/tests/unit/test__http.py b/vision/tests/unit/test__http.py new file mode 100644 index 000000000000..ee486e409b8a --- /dev/null +++ b/vision/tests/unit/test__http.py @@ -0,0 +1,214 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import unittest + +import mock + + +IMAGE_CONTENT = b'/9j/4QNURXhpZgAASUkq' +PROJECT = 'PROJECT' +B64_IMAGE_CONTENT = base64.b64encode(IMAGE_CONTENT).decode('ascii') + + +class TestConnection(unittest.TestCase): + + @staticmethod + def _get_target_class(): + from google.cloud.vision._http import Connection + + return Connection + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_default_url(self): + client = object() + conn = self._make_one(client) + self.assertEqual(conn._client, client) + + def test_extra_headers(self): + from google.cloud import _http as base_http + from google.cloud.vision import _http as MUT + + http = mock.Mock(spec=['request']) + response = mock.Mock(status=200, spec=['status']) + data = b'brent-spiner' + http.request.return_value = response, data + client = mock.Mock(_http=http, spec=['_http']) + + conn = self._make_one(client) + req_data = 'req-data-boring' + result = conn.api_request( + 'GET', '/rainbow', data=req_data, expect_json=False) + self.assertEqual(result, data) + + expected_headers = { + 'Content-Length': str(len(req_data)), + 'Accept-Encoding': 'gzip', + base_http.CLIENT_INFO_HEADER: MUT._CLIENT_INFO, + 'User-Agent': conn.USER_AGENT, + } + expected_uri = conn.build_api_url('/rainbow') + http.request.assert_called_once_with( + body=req_data, + headers=expected_headers, + method='GET', + uri=expected_uri, + ) + + +class Test_HTTPVisionAPI(unittest.TestCase): + def _get_target_class(self): + from google.cloud.vision._http import _HTTPVisionAPI + + return _HTTPVisionAPI + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_call_annotate_with_no_results(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + + client = mock.Mock(spec_set=['_connection']) + feature = Feature(FeatureTypes.LABEL_DETECTION, 5) + image_content = b'abc 1 2 3' + image = Image(client, content=image_content) + + http_api = self._make_one(client) + http_api._connection = mock.Mock(spec_set=['api_request']) + http_api._connection.api_request.return_value = {'responses': []} + images = ((image, [feature]),) + response = http_api.annotate(images) + self.assertEqual(len(response), 0) + self.assertIsInstance(response, list) + + def test_call_annotate_with_no_parameters(self): + client = mock.Mock(spec_set=['_connection']) + http_api = self._make_one(client) + http_api._connection = mock.Mock(spec_set=['api_request']) + + results = http_api.annotate() + self.assertEqual(results, []) + http_api._connection.api_request.assert_not_called() + + def test_call_annotate_with_pb_requests_results(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + client = mock.Mock(spec_set=['_connection']) + + feature_type = image_annotator_pb2.Feature.CROP_HINTS + feature = image_annotator_pb2.Feature(type=feature_type, max_results=2) + + image = image_annotator_pb2.Image(content=IMAGE_CONTENT) + + aspect_ratios = [1.3333, 1.7777] + crop_hints_params = image_annotator_pb2.CropHintsParams( + aspect_ratios=aspect_ratios) + image_context = image_annotator_pb2.ImageContext( + crop_hints_params=crop_hints_params) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + + http_api = self._make_one(client) + http_api._connection = mock.Mock(spec_set=['api_request']) + http_api._connection.api_request.return_value = {'responses': []} + + responses = http_api.annotate(requests_pb=[request]) + + # Establish that one and exactly one api_request call was made. + self.assertEqual(http_api._connection.api_request.call_count, 1) + + # Establish that the basic keyword arguments look correct. + call = http_api._connection.api_request.mock_calls[0] + self.assertEqual(call[2]['method'], 'POST') + self.assertEqual(call[2]['path'], '/images:annotate') + + # Establish that the responses look correct. + self.assertEqual(responses, []) + self.assertEqual(len(responses), 0) + + def test_call_annotate_with_more_than_one_result(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + from google.cloud.vision.likelihood import Likelihood + from tests.unit._fixtures import MULTIPLE_RESPONSE + + client = mock.Mock(spec_set=['_connection']) + feature = Feature(FeatureTypes.LABEL_DETECTION, 5) + image_content = b'abc 1 2 3' + image = Image(client, content=image_content) + + http_api = self._make_one(client) + http_api._connection = mock.Mock(spec_set=['api_request']) + http_api._connection.api_request.return_value = MULTIPLE_RESPONSE + images = ((image, [feature]),) + responses = http_api.annotate(images) + + self.assertEqual(len(responses), 2) + image_one = responses[0] + image_two = responses[1] + self.assertEqual(len(image_one.labels), 3) + self.assertIsInstance(image_one.safe_searches, tuple) + self.assertEqual(image_two.safe_searches.adult, + Likelihood.VERY_UNLIKELY) + self.assertEqual(len(image_two.labels), 0) + + +class TestVisionRequest(unittest.TestCase): + @staticmethod + def _get_target_function(): + from google.cloud.vision._http import _make_request + + return _make_request + + def _call_fut(self, *args, **kw): + return self._get_target_function()(*args, **kw) + + def test_call_vision_request(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + + client = object() + image = Image(client, content=IMAGE_CONTENT) + feature = Feature(feature_type=FeatureTypes.FACE_DETECTION, + max_results=3) + request = self._call_fut(image, feature) + self.assertEqual(request['image'].get('content'), B64_IMAGE_CONTENT) + features = request['features'] + self.assertEqual(len(features), 1) + feature = features[0] + self.assertEqual(feature['type'], FeatureTypes.FACE_DETECTION) + self.assertEqual(feature['maxResults'], 3) + + def test_call_vision_request_with_not_feature(self): + from google.cloud.vision.image import Image + + client = object() + image = Image(client, content=IMAGE_CONTENT) + with self.assertRaises(TypeError): + self._call_fut(image, 'nonsensefeature') + + def test_call_vision_request_with_list_bad_features(self): + from google.cloud.vision.image import Image + + client = object() + image = Image(client, content=IMAGE_CONTENT) + with self.assertRaises(TypeError): + self._call_fut(image, ['nonsensefeature']) diff --git a/vision/tests/unit/test_annotations.py b/vision/tests/unit/test_annotations.py new file mode 100644 index 000000000000..89d03def13a5 --- /dev/null +++ b/vision/tests/unit/test_annotations.py @@ -0,0 +1,207 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +def _make_pb_entity(): + from google.cloud.proto.vision.v1 import geometry_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.type import latlng_pb2 + + description = 'testing 1 2 3' + locale = 'US' + mid = 'm/w/45342234' + score = 0.390625 + + entity_annotation = image_annotator_pb2.EntityAnnotation( + mid=mid, + locale=locale, + description=description, + score=score, + bounding_poly=geometry_pb2.BoundingPoly( + vertices=[ + geometry_pb2.Vertex(x=1, y=2), + ], + ), + locations=[ + image_annotator_pb2.LocationInfo( + lat_lng=latlng_pb2.LatLng(latitude=1.0, longitude=2.0), + ), + ], + ) + return entity_annotation + + +class TestAnnotations(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.annotations import Annotations + + return Annotations + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_ctor(self): + annotations = self._make_one( + faces=[True], properties=[True], labels=[True], landmarks=[True], + logos=[True], safe_searches=[True], texts=[True]) + self.assertEqual(annotations.faces, [True]) + self.assertEqual(annotations.properties, [True]) + self.assertEqual(annotations.labels, [True]) + self.assertEqual(annotations.landmarks, [True]) + self.assertEqual(annotations.logos, [True]) + self.assertEqual(annotations.safe_searches, [True]) + self.assertEqual(annotations.texts, [True]) + + def test_unsupported_http_annotation(self): + returned = { + 'responses': [ + {'someMadeUpAnnotation': None}, + ], + } + annotation = self._get_target_class().from_api_repr(returned) + self.assertIsInstance(annotation, self._get_target_class()) + + def test_from_pb(self): + from google.cloud.vision.likelihood import Likelihood + from google.cloud.vision.safe_search import SafeSearchAnnotation + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + image_response = image_annotator_pb2.AnnotateImageResponse() + annotations = self._make_one().from_pb(image_response) + self.assertEqual(annotations.labels, []) + self.assertEqual(annotations.logos, []) + self.assertEqual(annotations.faces, []) + self.assertEqual(annotations.landmarks, []) + self.assertEqual(annotations.texts, []) + self.assertIsNone(annotations.properties) + + self.assertIsInstance(annotations.safe_searches, SafeSearchAnnotation) + safe_search = annotations.safe_searches + unknown = Likelihood.UNKNOWN + self.assertIs(safe_search.adult, unknown) + self.assertIs(safe_search.spoof, unknown) + self.assertIs(safe_search.medical, unknown) + self.assertIs(safe_search.violence, unknown) + + +class Test__make_entity_from_pb(unittest.TestCase): + def _call_fut(self, annotations): + from google.cloud.vision.annotations import _make_entity_from_pb + + return _make_entity_from_pb(annotations) + + def test_it(self): + description = 'testing 1 2 3' + locale = 'US' + mid = 'm/w/45342234' + score = 0.390625 + entity_annotation = _make_pb_entity() + entities = self._call_fut([entity_annotation]) + self.assertEqual(len(entities), 1) + entity = entities[0] + self.assertEqual(entity.description, description) + self.assertEqual(entity.mid, mid) + self.assertEqual(entity.locale, locale) + self.assertEqual(entity.score, score) + self.assertEqual(len(entity.bounds.vertices), 1) + self.assertEqual(entity.bounds.vertices[0].x_coordinate, 1) + self.assertEqual(entity.bounds.vertices[0].y_coordinate, 2) + self.assertEqual(len(entity.locations), 1) + self.assertEqual(entity.locations[0].latitude, 1.0) + self.assertEqual(entity.locations[0].longitude, 2.0) + + +class Test__make_faces_from_pb(unittest.TestCase): + def _call_fut(self, annotations): + from google.cloud.vision.annotations import _make_faces_from_pb + + return _make_faces_from_pb(annotations) + + def test_it(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.cloud.vision.face import Face + + faces_pb = [image_annotator_pb2.FaceAnnotation()] + + faces = self._call_fut(faces_pb) + self.assertIsInstance(faces[0], Face) + + +class Test__make_image_properties_from_pb(unittest.TestCase): + def _call_fut(self, annotations): + from google.cloud.vision.annotations import ( + _make_image_properties_from_pb) + + return _make_image_properties_from_pb(annotations) + + def test_it(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.protobuf.wrappers_pb2 import FloatValue + from google.type.color_pb2 import Color + + alpha = FloatValue(value=1.0) + color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) + color_info_pb = image_annotator_pb2.ColorInfo(color=color_pb, + score=1.0, + pixel_fraction=1.0) + dominant_colors = image_annotator_pb2.DominantColorsAnnotation( + colors=[color_info_pb]) + + image_properties_pb = image_annotator_pb2.ImageProperties( + dominant_colors=dominant_colors) + + image_properties = self._call_fut(image_properties_pb) + self.assertEqual(image_properties.colors[0].pixel_fraction, 1.0) + self.assertEqual(image_properties.colors[0].score, 1.0) + self.assertEqual(image_properties.colors[0].color.red, 1.0) + self.assertEqual(image_properties.colors[0].color.green, 2.0) + self.assertEqual(image_properties.colors[0].color.blue, 3.0) + self.assertEqual(image_properties.colors[0].color.alpha, 1.0) + + +class Test__process_image_annotations(unittest.TestCase): + def _call_fut(self, image): + from google.cloud.vision.annotations import _process_image_annotations + + return _process_image_annotations(image) + + def test_it(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + description = 'testing 1 2 3' + locale = 'US' + mid = 'm/w/45342234' + score = 0.390625 + entity_annotation = _make_pb_entity() + + image_response = image_annotator_pb2.AnnotateImageResponse( + label_annotations=[entity_annotation]) + + annotations = self._call_fut(image_response) + self.assertEqual(len(annotations['labels']), 1) + entity = annotations['labels'][0] + + self.assertEqual(entity.description, description) + self.assertEqual(entity.mid, mid) + self.assertEqual(entity.locale, locale) + self.assertEqual(entity.score, score) + self.assertEqual(len(entity.bounds.vertices), 1) + self.assertEqual(entity.bounds.vertices[0].x_coordinate, 1) + self.assertEqual(entity.bounds.vertices[0].y_coordinate, 2) + self.assertEqual(len(entity.locations), 1) + self.assertEqual(entity.locations[0].latitude, 1.0) + self.assertEqual(entity.locations[0].longitude, 2.0) diff --git a/vision/tests/unit/test_batch.py b/vision/tests/unit/test_batch.py new file mode 100644 index 000000000000..bda1148eca7e --- /dev/null +++ b/vision/tests/unit/test_batch.py @@ -0,0 +1,83 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mock + +PROJECT = 'PROJECT' + + +def _make_credentials(): + import google.auth.credentials + + return mock.Mock(spec=google.auth.credentials.Credentials) + + +class TestBatch(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.batch import Batch + + return Batch + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_ctor(self): + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from google.cloud.vision.image import Image + + client = mock.Mock(spec=[]) + image = Image(client, source_uri='gs://images/imageone.jpg') + face_feature = Feature(FeatureTypes.FACE_DETECTION, 5) + logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 3) + + batch = self._make_one(client) + batch.add_image(image, [logo_feature, face_feature]) + self.assertEqual(len(batch.images), 1) + self.assertEqual(len(batch.images[0]), 2) + self.assertIsInstance(batch.images[0][0], Image) + self.assertEqual(len(batch.images[0][1]), 2) + self.assertIsInstance(batch.images[0][1][0], Feature) + self.assertIsInstance(batch.images[0][1][1], Feature) + + def test_batch_from_client(self): + from google.cloud.vision.client import Client + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + + creds = _make_credentials() + client = Client(project=PROJECT, credentials=creds) + + image_one = client.image(source_uri='gs://images/imageone.jpg') + image_two = client.image(source_uri='gs://images/imagtwo.jpg') + face_feature = Feature(FeatureTypes.FACE_DETECTION, 5) + logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 3) + + # Make mocks. + annotate = mock.Mock(return_value=True, spec=[]) + vision_api = mock.Mock(annotate=annotate, spec=['annotate']) + client._vision_api_internal = vision_api + + # Actually call the partially-mocked method. + batch = client.batch() + batch.add_image(image_one, [face_feature]) + batch.add_image(image_two, [logo_feature, face_feature]) + images = batch.images + self.assertEqual(len(images), 2) + self.assertTrue(batch.detect()) + self.assertEqual(len(batch.images), 0) + client._vision_api_internal.annotate.assert_called_with(images) diff --git a/vision/tests/unit/test_client.py b/vision/tests/unit/test_client.py new file mode 100644 index 000000000000..45690e5f88c4 --- /dev/null +++ b/vision/tests/unit/test_client.py @@ -0,0 +1,633 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import unittest + +import mock + + +IMAGE_CONTENT = b'/9j/4QNURXhpZgAASUkq' +IMAGE_SOURCE = 'gs://some/image.jpg' +PROJECT = 'PROJECT' +B64_IMAGE_CONTENT = base64.b64encode(IMAGE_CONTENT).decode('ascii') + + +def _make_credentials(): + import google.auth.credentials + + return mock.Mock(spec=google.auth.credentials.Credentials) + + +class TestClient(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.client import Client + + return Client + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_ctor(self): + creds = _make_credentials() + client = self._make_one(project=PROJECT, credentials=creds) + self.assertEqual(client.project, PROJECT) + + def test_annotate_with_preset_api(self): + credentials = _make_credentials() + client = self._make_one(project=PROJECT, credentials=credentials) + vision_api = client._vision_api + vision_api._connection = _Connection() + + annotate = mock.Mock(return_value=mock.sentinel.annotated, spec=[]) + api = mock.Mock(annotate=annotate, spec=['annotate']) + + client._vision_api_internal = api + client._vision_api.annotate() + annotate.assert_called_once_with() + + def test_make_gax_client(self): + from google.cloud.vision._gax import _GAPICVisionAPI + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=None) + vision_api = client._vision_api + vision_api._connection = _Connection() + with mock.patch('google.cloud.vision.client._GAPICVisionAPI', + spec=True): + self.assertIsInstance(client._vision_api, _GAPICVisionAPI) + + def test_make_http_client(self): + from google.cloud.vision._http import _HTTPVisionAPI + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + self.assertIsInstance(client._vision_api, _HTTPVisionAPI) + + def test_face_annotation(self): + from google.cloud.vision.annotations import Annotations + from google.cloud.vision.feature import Feature, FeatureTypes + from tests.unit._fixtures import FACE_DETECTION_RESPONSE + + returned = FACE_DETECTION_RESPONSE + request = { + "requests": [ + { + "image": { + "content": B64_IMAGE_CONTENT, + }, + "features": [ + { + "maxResults": 3, + "type": "FACE_DETECTION", + }, + ], + }, + ], + } + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(returned) + vision_api._connection = connection + + features = [ + Feature(feature_type=FeatureTypes.FACE_DETECTION, max_results=3), + ] + image = client.image(content=IMAGE_CONTENT) + images = ((image, features),) + api_response = client._vision_api.annotate(images) + + self.assertEqual(len(api_response), 1) + response = api_response[0] + self.assertEqual( + request, connection._requested[0]['data']) + self.assertIsInstance(response, Annotations) + + def test_image_with_client_gcs_source(self): + from google.cloud.vision.image import Image + + credentials = _make_credentials() + client = self._make_one(project=PROJECT, credentials=credentials) + gcs_image = client.image(source_uri=IMAGE_SOURCE) + self.assertIsInstance(gcs_image, Image) + self.assertEqual(gcs_image.source, IMAGE_SOURCE) + + def test_image_with_client_raw_content(self): + from google.cloud.vision.image import Image + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + raw_image = client.image(content=IMAGE_CONTENT) + self.assertIsInstance(raw_image, Image) + self.assertEqual(raw_image.content, IMAGE_CONTENT) + + def test_image_with_client_filename(self): + from mock import mock_open + from mock import patch + from google.cloud.vision.image import Image + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + with patch('google.cloud.vision.image.open', + mock_open(read_data=IMAGE_CONTENT)) as m: + file_image = client.image(filename='my_image.jpg') + m.assert_called_once_with('my_image.jpg', 'rb') + self.assertIsInstance(file_image, Image) + self.assertEqual(file_image.content, IMAGE_CONTENT) + + def test_multiple_detection_from_content(self): + import copy + from google.cloud.vision.feature import Feature + from google.cloud.vision.feature import FeatureTypes + from tests.unit._fixtures import LABEL_DETECTION_RESPONSE + from tests.unit._fixtures import LOGO_DETECTION_RESPONSE + + returned = copy.deepcopy(LABEL_DETECTION_RESPONSE) + logos = copy.deepcopy(LOGO_DETECTION_RESPONSE['responses'][0]) + returned['responses'][0]['logoAnnotations'] = logos['logoAnnotations'] + + credentials = _make_credentials() + client = self._make_one(project=PROJECT, credentials=credentials, + _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(returned) + vision_api._connection = connection + + limit = 2 + label_feature = Feature(FeatureTypes.LABEL_DETECTION, limit) + logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit) + features = [label_feature, logo_feature] + image = client.image(content=IMAGE_CONTENT) + detected_items = image.detect(features) + + self.assertEqual(len(detected_items), 1) + items = detected_items[0] + self.assertEqual(len(items.logos), 2) + self.assertEqual(len(items.labels), 3) + first_logo = items.logos[0] + second_logo = items.logos[1] + self.assertEqual(first_logo.description, 'Brand1') + self.assertEqual(first_logo.score, 0.63192177) + self.assertEqual(second_logo.description, 'Brand2') + self.assertEqual(second_logo.score, 0.5492993) + + first_label = items.labels[0] + second_label = items.labels[1] + third_label = items.labels[2] + self.assertEqual(first_label.description, 'automobile') + self.assertEqual(first_label.score, 0.9776855) + self.assertEqual(second_label.description, 'vehicle') + self.assertEqual(second_label.score, 0.947987) + self.assertEqual(third_label.description, 'truck') + self.assertEqual(third_label.score, 0.88429511) + + requested = connection._requested + requests = requested[0]['data']['requests'] + image_request = requests[0] + label_request = image_request['features'][0] + logo_request = image_request['features'][1] + + self.assertEqual(B64_IMAGE_CONTENT, + image_request['image']['content']) + self.assertEqual(label_request['maxResults'], 2) + self.assertEqual(label_request['type'], 'LABEL_DETECTION') + self.assertEqual(logo_request['maxResults'], 2) + self.assertEqual(logo_request['type'], 'LOGO_DETECTION') + + def test_detect_crop_hints_from_source(self): + from google.cloud.vision.crop_hint import CropHint + from tests.unit._fixtures import CROP_HINTS_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + api = client._vision_api + api._connection = _Connection(CROP_HINTS_RESPONSE) + image = client.image(source_uri=IMAGE_SOURCE) + crop_hints = image.detect_crop_hints(aspect_ratios=[1.3333], limit=3) + + self.assertEqual(len(crop_hints), 2) + self.assertIsInstance(crop_hints[0], CropHint) + image_request = api._connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcsImageUri'], IMAGE_SOURCE) + + crop_hints = image_request['imageContext']['cropHintsParams'] + ratios = crop_hints['aspectRatios'] + self.assertAlmostEqual(ratios[0], 1.3333, 4) + self.assertEqual(image_request['features'][0]['maxResults'], 3) + + def test_face_detection_from_source(self): + from google.cloud.vision.face import Face + from tests.unit._fixtures import FACE_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(FACE_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + faces = image.detect_faces(limit=3) + self.assertEqual(len(faces), 5) + for face in faces: + self.assertIsInstance(face, Face) + + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + IMAGE_SOURCE, image_request['image']['source']['gcs_image_uri']) + self.assertEqual(image_request['features'][0]['maxResults'], 3) + + def test_face_detection_from_content(self): + from google.cloud.vision.face import Face + from tests.unit._fixtures import FACE_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(FACE_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(content=IMAGE_CONTENT) + faces = image.detect_faces(limit=5) + self.assertEqual(len(faces), 5) + for face in faces: + self.assertIsInstance(face, Face) + + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual(B64_IMAGE_CONTENT, image_request['image']['content']) + self.assertEqual(image_request['features'][0]['maxResults'], 5) + + def test_face_detection_from_content_no_results(self): + returned = { + 'responses': [{}] + } + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(returned) + vision_api._connection = connection + + image = client.image(content=IMAGE_CONTENT) + faces = image.detect_faces(limit=5) + self.assertEqual(faces, ()) + self.assertEqual(len(faces), 0) + + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual(B64_IMAGE_CONTENT, image_request['image']['content']) + self.assertEqual(image_request['features'][0]['maxResults'], 5) + + def test_detect_full_text_annotation(self): + from google.cloud.vision.text import TextAnnotation + from tests.unit._fixtures import FULL_TEXT_RESPONSE + + returned = FULL_TEXT_RESPONSE + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + api = client._vision_api + api._connection = _Connection(returned) + image = client.image(source_uri=IMAGE_SOURCE) + full_text = image.detect_full_text(language_hints=['en'], limit=2) + + self.assertIsInstance(full_text, TextAnnotation) + self.assertEqual(full_text.text, 'The Republic\nBy Plato') + self.assertEqual(len(full_text.pages), 1) + self.assertEqual(len(full_text.pages), 1) + page = full_text.pages[0] + self.assertEqual(page.height, 1872) + self.assertEqual(page.width, 792) + self.assertEqual(len(page.blocks), 1) + self.assertEqual(len(page.blocks[0].paragraphs), 1) + self.assertEqual(len(page.blocks[0].paragraphs[0].words), 1) + + image_request = api._connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcsImageUri'], IMAGE_SOURCE) + self.assertEqual( + len(image_request['imageContext']['languageHints']), 1) + self.assertEqual( + image_request['imageContext']['languageHints'][0], 'en') + self.assertEqual(image_request['features'][0]['maxResults'], 2) + self.assertEqual( + image_request['features'][0]['type'], 'DOCUMENT_TEXT_DETECTION') + + def test_label_detection_from_source(self): + from google.cloud.vision.entity import EntityAnnotation + from tests.unit._fixtures import LABEL_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(LABEL_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + labels = image.detect_labels(limit=3) + self.assertEqual(len(labels), 3) + for label in labels: + self.assertIsInstance(label, EntityAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + self.assertEqual(image_request['features'][0]['maxResults'], 3) + self.assertEqual(labels[0].description, 'automobile') + self.assertEqual(labels[1].description, 'vehicle') + self.assertEqual(labels[0].mid, '/m/0k4j') + self.assertEqual(labels[1].mid, '/m/07yv9') + + def test_label_detection_no_results(self): + returned = { + 'responses': [{}] + } + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + vision_api._connection = _Connection(returned) + + image = client.image(content=IMAGE_CONTENT) + labels = image.detect_labels() + self.assertEqual(labels, ()) + self.assertEqual(len(labels), 0) + + def test_landmark_detection_from_source(self): + from google.cloud.vision.entity import EntityAnnotation + from tests.unit._fixtures import LANDMARK_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one(project=PROJECT, credentials=credentials, + _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(LANDMARK_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + landmarks = image.detect_landmarks(limit=3) + self.assertEqual(len(landmarks), 2) + + for landmark in landmarks: + self.assertIsInstance(landmark, EntityAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + self.assertEqual(image_request['features'][0]['maxResults'], 3) + self.assertEqual(landmarks[0].locations[0].latitude, 48.861013) + self.assertEqual(landmarks[0].locations[0].longitude, 2.335818) + self.assertEqual(landmarks[0].mid, '/m/04gdr') + self.assertEqual(landmarks[1].mid, '/m/094llg') + + def test_landmark_detection_from_content(self): + from google.cloud.vision.entity import EntityAnnotation + from tests.unit._fixtures import LANDMARK_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(LANDMARK_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(content=IMAGE_CONTENT) + landmarks = image.detect_landmarks(limit=5) + self.assertEqual(len(landmarks), 2) + for landmark in landmarks: + self.assertIsInstance(landmark, EntityAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual(image_request['image']['content'], B64_IMAGE_CONTENT) + self.assertEqual(image_request['features'][0]['maxResults'], 5) + + def test_landmark_detection_no_results(self): + returned = { + 'responses': [{}] + } + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + vision_api._connection = _Connection(returned) + + image = client.image(content=IMAGE_CONTENT) + landmarks = image.detect_landmarks() + self.assertEqual(landmarks, ()) + self.assertEqual(len(landmarks), 0) + + def test_logo_detection_from_source(self): + from google.cloud.vision.entity import EntityAnnotation + from tests.unit._fixtures import LOGO_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(LOGO_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + logos = image.detect_logos(limit=3) + self.assertEqual(len(logos), 2) + for logo in logos: + self.assertIsInstance(logo, EntityAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + self.assertEqual(image_request['features'][0]['maxResults'], 3) + + def test_logo_detection_from_content(self): + from google.cloud.vision.entity import EntityAnnotation + from tests.unit._fixtures import LOGO_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(LOGO_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(content=IMAGE_CONTENT) + logos = image.detect_logos(limit=5) + self.assertEqual(len(logos), 2) + for logo in logos: + self.assertIsInstance(logo, EntityAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual(image_request['image']['content'], B64_IMAGE_CONTENT) + self.assertEqual(image_request['features'][0]['maxResults'], 5) + + def test_text_detection_from_source(self): + from google.cloud.vision.entity import EntityAnnotation + from tests.unit._fixtures import TEXT_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(TEXT_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + text = image.detect_text(limit=3) + self.assertEqual(3, len(text)) + self.assertIsInstance(text[0], EntityAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + self.assertEqual(image_request['features'][0]['maxResults'], 3) + self.assertEqual(text[0].locale, 'en') + self.assertEqual(text[0].description, 'Google CloudPlatform\n') + self.assertEqual(text[1].description, 'Google') + self.assertEqual(text[0].bounds.vertices[0].x_coordinate, 129) + self.assertEqual(text[0].bounds.vertices[0].y_coordinate, 694) + + def test_safe_search_detection_from_source(self): + from google.cloud.vision.likelihood import Likelihood + from google.cloud.vision.safe_search import SafeSearchAnnotation + from tests.unit._fixtures import SAFE_SEARCH_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(SAFE_SEARCH_DETECTION_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + safe_search = image.detect_safe_search() + self.assertIsInstance(safe_search, SafeSearchAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + + self.assertIs(safe_search.adult, Likelihood.VERY_UNLIKELY) + self.assertIs(safe_search.spoof, Likelihood.UNLIKELY) + self.assertIs(safe_search.medical, Likelihood.POSSIBLE) + self.assertIs(safe_search.violence, Likelihood.VERY_UNLIKELY) + + def test_safe_search_no_results(self): + returned = { + 'responses': [{}] + } + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + vision_api._connection = _Connection(returned) + + image = client.image(content=IMAGE_CONTENT) + safe_search = image.detect_safe_search() + self.assertEqual(safe_search, ()) + self.assertEqual(len(safe_search), 0) + + def test_image_properties_detection_from_source(self): + from google.cloud.vision.color import ImagePropertiesAnnotation + from tests.unit._fixtures import IMAGE_PROPERTIES_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + connection = _Connection(IMAGE_PROPERTIES_RESPONSE) + vision_api._connection = connection + + image = client.image(source_uri=IMAGE_SOURCE) + image_properties = image.detect_properties() + self.assertIsInstance(image_properties, ImagePropertiesAnnotation) + image_request = connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + self.assertEqual(image_properties.colors[0].score, 0.42258179) + self.assertEqual( + image_properties.colors[0].pixel_fraction, 0.025376344) + self.assertEqual(image_properties.colors[0].color.red, 253) + self.assertEqual(image_properties.colors[0].color.green, 203) + self.assertEqual(image_properties.colors[0].color.blue, 65) + self.assertEqual(image_properties.colors[0].color.alpha, 0.0) + + def test_image_properties_no_results(self): + returned = { + 'responses': [{}] + } + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + vision_api = client._vision_api + vision_api._connection = _Connection(returned) + + image = client.image(content=IMAGE_CONTENT) + image_properties = image.detect_properties() + self.assertEqual(image_properties, ()) + self.assertEqual(len(image_properties), 0) + + def test_detect_web_detection(self): + from google.cloud.vision.web import WebEntity + from google.cloud.vision.web import WebImage + from google.cloud.vision.web import WebPage + from tests.unit._fixtures import WEB_DETECTION_RESPONSE + + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials, _use_grpc=False) + api = client._vision_api + api._connection = _Connection(WEB_DETECTION_RESPONSE) + image = client.image(source_uri=IMAGE_SOURCE) + web_images = image.detect_web(limit=2) + + self.assertEqual(len(web_images.partial_matching_images), 2) + self.assertEqual(len(web_images.full_matching_images), 2) + self.assertEqual(len(web_images.web_entities), 2) + self.assertEqual(len(web_images.pages_with_matching_images), 2) + + for partial_match in web_images.partial_matching_images: + self.assertIsInstance(partial_match, WebImage) + + for full_match in web_images.full_matching_images: + self.assertIsInstance(full_match, WebImage) + + for web_entity in web_images.web_entities: + self.assertIsInstance(web_entity, WebEntity) + + for page in web_images.pages_with_matching_images: + self.assertIsInstance(page, WebPage) + + image_request = api._connection._requested[0]['data']['requests'][0] + self.assertEqual( + image_request['image']['source']['gcs_image_uri'], IMAGE_SOURCE) + self.assertEqual(image_request['features'][0]['maxResults'], 2) + self.assertEqual( + image_request['features'][0]['type'], 'WEB_DETECTION') + + +class _Connection(object): + + def __init__(self, *responses): + self._responses = responses + self._requested = [] + + def api_request(self, **kw): + import json + + json.dumps(kw.get('data', '')) # Simulate JSON encoding. + self._requested.append(kw) + response, self._responses = self._responses[0], self._responses[1:] + return response diff --git a/vision/tests/unit/test_color.py b/vision/tests/unit/test_color.py new file mode 100644 index 000000000000..17541524dac9 --- /dev/null +++ b/vision/tests/unit/test_color.py @@ -0,0 +1,131 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestColor(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.color import Color + + return Color + + def test_rgb_color_data(self): + colors = { + 'red': 255, + 'green': 255, + 'blue': 255, + 'alpha': 0.5, + } + color_class = self._get_target_class() + colors = color_class.from_api_repr(colors) + + self.assertIsInstance(colors.red, float) + self.assertIsInstance(colors.green, float) + self.assertIsInstance(colors.blue, float) + self.assertIsInstance(colors.alpha, float) + self.assertEqual(colors.red, 255.0) + self.assertEqual(colors.green, 255.0) + self.assertEqual(colors.blue, 255.0) + self.assertEqual(colors.alpha, 0.5) + + def test_empty_pb_rgb_color_data(self): + from google.type.color_pb2 import Color + + color_pb = Color() + color_class = self._get_target_class() + color = color_class.from_pb(color_pb) + self.assertEqual(color.red, 0.0) + self.assertEqual(color.green, 0.0) + self.assertEqual(color.blue, 0.0) + self.assertEqual(color.alpha, 0.0) + + def test_pb_rgb_color_data(self): + from google.protobuf.wrappers_pb2 import FloatValue + from google.type.color_pb2 import Color + + alpha = FloatValue(value=1.0) + color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) + color_class = self._get_target_class() + color = color_class.from_pb(color_pb) + self.assertEqual(color.red, 1.0) + self.assertEqual(color.green, 2.0) + self.assertEqual(color.blue, 3.0) + self.assertEqual(color.alpha, 1.0) + + def test_pb_rgb_color_no_alpha_data(self): + from google.protobuf.wrappers_pb2 import FloatValue + from google.type.color_pb2 import Color + + alpha = FloatValue() + color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) + color_class = self._get_target_class() + color = color_class.from_pb(color_pb) + self.assertEqual(color.red, 1.0) + self.assertEqual(color.green, 2.0) + self.assertEqual(color.blue, 3.0) + self.assertEqual(color.alpha, 0.0) + + def test_missing_rgb_values(self): + colors = {} + color_class = self._get_target_class() + colors = color_class.from_api_repr(colors) + + self.assertEqual(colors.red, 0) + self.assertEqual(colors.green, 0) + self.assertEqual(colors.blue, 0) + self.assertEqual(colors.alpha, 0.0) + + +class TestImagePropertiesAnnotation(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.color import ImagePropertiesAnnotation + + return ImagePropertiesAnnotation + + def test_image_properties_annotation_from_pb(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.protobuf.wrappers_pb2 import FloatValue + from google.type.color_pb2 import Color + + alpha = FloatValue(value=1.0) + color_pb = Color(red=1.0, green=2.0, blue=3.0, alpha=alpha) + color_info_pb = image_annotator_pb2.ColorInfo( + color=color_pb, score=1.0, pixel_fraction=1.0) + dominant_colors = image_annotator_pb2.DominantColorsAnnotation( + colors=[color_info_pb]) + + image_properties_pb = image_annotator_pb2.ImageProperties( + dominant_colors=dominant_colors) + + color_info = self._get_target_class() + image_properties = color_info.from_pb(image_properties_pb) + + self.assertEqual(image_properties.colors[0].pixel_fraction, 1.0) + self.assertEqual(image_properties.colors[0].score, 1.0) + self.assertEqual(image_properties.colors[0].color.red, 1.0) + self.assertEqual(image_properties.colors[0].color.green, 2.0) + self.assertEqual(image_properties.colors[0].color.blue, 3.0) + self.assertEqual(image_properties.colors[0].color.alpha, 1.0) + + def test_empty_image_properties_annotation_from_pb(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + image_properties_pb = image_annotator_pb2.ImageProperties() + + color_info = self._get_target_class() + image_properties = color_info.from_pb(image_properties_pb) + self.assertIsNone(image_properties) diff --git a/vision/tests/unit/test_crop_hint.py b/vision/tests/unit/test_crop_hint.py new file mode 100644 index 000000000000..15f10ce1eec2 --- /dev/null +++ b/vision/tests/unit/test_crop_hint.py @@ -0,0 +1,55 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestCropHint(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.crop_hint import CropHint + + return CropHint + + def test_crop_hint_annotation(self): + from tests.unit._fixtures import CROP_HINTS_RESPONSE + from google.cloud.vision.geometry import Bounds + + response = CROP_HINTS_RESPONSE['responses'][0]['cropHintsAnnotation'] + crop_hints_dict = response['cropHints'][0] + crop_hints_class = self._get_target_class() + crop_hints = crop_hints_class.from_api_repr(crop_hints_dict) + + self.assertIsInstance(crop_hints.bounds, Bounds) + self.assertEqual(len(crop_hints.bounds.vertices), 4) + self.assertEqual(crop_hints.confidence, 0.5) + self.assertEqual(crop_hints.importance_fraction, 1.22) + + def test_crop_hint_annotation_pb(self): + from google.cloud.proto.vision.v1 import geometry_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + vertex = geometry_pb2.Vertex(x=1, y=2) + bounds = geometry_pb2.BoundingPoly(vertices=[vertex]) + crop_hint_pb = image_annotator_pb2.CropHint( + bounding_poly=bounds, confidence=1.23, importance_fraction=4.56) + crop_hints_class = self._get_target_class() + crop_hint = crop_hints_class.from_pb(crop_hint_pb) + + self.assertEqual(len(crop_hint.bounds.vertices), 1) + vertex = crop_hint.bounds.vertices[0] + self.assertEqual(vertex.x_coordinate, 1) + self.assertEqual(vertex.y_coordinate, 2) + self.assertAlmostEqual(crop_hint.confidence, 1.23, 4) + self.assertAlmostEqual(crop_hint.importance_fraction, 4.56, 4) diff --git a/vision/tests/vision/test_decorators.py b/vision/tests/unit/test_decorators.py similarity index 93% rename from vision/tests/vision/test_decorators.py rename to vision/tests/unit/test_decorators.py index 0a1a4481f432..cdcc6f778e73 100644 --- a/vision/tests/vision/test_decorators.py +++ b/vision/tests/unit/test_decorators.py @@ -52,7 +52,7 @@ class A(object): class SingleFeatureMethodTests(unittest.TestCase): @mock.patch.object(vision.ImageAnnotatorClient, 'annotate_image') def test_runs_generic_single_image(self, ai): - ai.return_value = vision.image_annotator.AnnotateImageResponse() + ai.return_value = vision.types.AnnotateImageResponse() # Make a face detection request. client = vision.ImageAnnotatorClient() @@ -61,6 +61,6 @@ def test_runs_generic_single_image(self, ai): # Assert that the single-image method was called as expected. ai.assert_called_once_with({ - 'features': [vision.enums.Feature.Type.FACE_DETECTION], + 'features': [{'type': vision.enums.Feature.Type.FACE_DETECTION}], 'image': image, }, options=None) diff --git a/vision/tests/unit/test_entity.py b/vision/tests/unit/test_entity.py new file mode 100644 index 000000000000..d5b0465d31c9 --- /dev/null +++ b/vision/tests/unit/test_entity.py @@ -0,0 +1,66 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestEntityAnnotation(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.entity import EntityAnnotation + return EntityAnnotation + + def test_logo_annotation(self): + from tests.unit._fixtures import LOGO_DETECTION_RESPONSE + + entity_class = self._get_target_class() + logo = entity_class.from_api_repr( + LOGO_DETECTION_RESPONSE['responses'][0]['logoAnnotations'][0]) + + self.assertEqual(logo.mid, '/m/05b5c') + self.assertEqual(logo.description, 'Brand1') + self.assertEqual(logo.score, 0.63192177) + self.assertEqual(logo.bounds.vertices[0].x_coordinate, 78) + self.assertEqual(logo.bounds.vertices[0].y_coordinate, 162) + + def test_logo_pb_annotation(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + description = 'testing 1 2 3' + locale = 'US' + mid = 'm/w/45342234' + score = 0.875 + entity_annotation = image_annotator_pb2.EntityAnnotation() + entity_annotation.mid = mid + entity_annotation.locale = locale + entity_annotation.description = description + entity_annotation.score = score + entity_annotation.bounding_poly.vertices.add() + entity_annotation.bounding_poly.vertices[0].x = 1 + entity_annotation.bounding_poly.vertices[0].y = 2 + entity_annotation.locations.add() + entity_annotation.locations[0].lat_lng.latitude = 1.0 + entity_annotation.locations[0].lat_lng.longitude = 2.0 + + entity_class = self._get_target_class() + entity = entity_class.from_pb(entity_annotation) + + self.assertEqual(entity.description, description) + self.assertEqual(entity.mid, mid) + self.assertEqual(entity.locale, locale) + self.assertEqual(entity.score, score) + self.assertEqual(entity.bounds.vertices[0].x_coordinate, 1) + self.assertEqual(entity.bounds.vertices[0].y_coordinate, 2) + self.assertEqual(entity.locations[0].latitude, 1.0) + self.assertEqual(entity.locations[0].longitude, 2.0) diff --git a/vision/tests/unit/test_face.py b/vision/tests/unit/test_face.py new file mode 100644 index 000000000000..8773a00764de --- /dev/null +++ b/vision/tests/unit/test_face.py @@ -0,0 +1,94 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestFace(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.face import Face + + return Face + + def _make_face_pb(self, *args, **kwargs): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + return image_annotator_pb2.FaceAnnotation(*args, **kwargs) + + def setUp(self): + from tests.unit._fixtures import FACE_DETECTION_RESPONSE + + self.face_annotations = FACE_DETECTION_RESPONSE['responses'][0] + self.face_class = self._get_target_class() + self.face = self.face_class.from_api_repr( + self.face_annotations['faceAnnotations'][0]) + + def test_face_from_pb(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import geometry_pb2 + + position_pb = geometry_pb2.Position(x=1.0, y=2.0, z=3.0) + landmark_pb = image_annotator_pb2.FaceAnnotation.Landmark( + position=position_pb, type=5) + face_pb = self._make_face_pb(landmarks=[landmark_pb]) + + face = self._get_target_class().from_pb(face_pb) + self.assertIsInstance(face, self._get_target_class()) + + def test_face_landmarks(self): + from google.cloud.vision.face import LandmarkTypes + + self.assertEqual(self.face.landmarking_confidence, 0.54453093) + self.assertEqual(self.face.detection_confidence, 0.9863683) + self.assertTrue(hasattr(self.face.landmarks, 'left_eye')) + left_eye = self.face.landmarks.left_eye + self.assertEqual(left_eye.position.x_coordinate, 1004.8003) + self.assertEqual(left_eye.position.y_coordinate, 482.69385) + self.assertEqual(left_eye.position.z_coordinate, 0.0016593217) + self.assertEqual(left_eye.landmark_type, LandmarkTypes.LEFT_EYE) + + def test_facial_emotions(self): + from google.cloud.vision.face import Likelihood + + self.assertEqual(self.face.joy, Likelihood.VERY_LIKELY) + self.assertEqual(self.face.sorrow, Likelihood.VERY_UNLIKELY) + self.assertEqual(self.face.surprise, Likelihood.VERY_UNLIKELY) + self.assertEqual(self.face.anger, Likelihood.VERY_UNLIKELY) + + def test_facial_angles(self): + self.assertEqual(self.face.angles.roll, -0.43419784) + self.assertEqual(self.face.angles.pan, 6.027647) + self.assertEqual(self.face.angles.tilt, -18.412321) + + def test_face_headware_and_blur_and_underexposed(self): + from google.cloud.vision.face import Likelihood + + very_unlikely = Likelihood.VERY_UNLIKELY + image_properties = self.face.image_properties + self.assertEqual(image_properties.blurred, very_unlikely) + self.assertEqual(image_properties.underexposed, very_unlikely) + self.assertEqual(self.face.headwear, Likelihood.VERY_UNLIKELY) + + def test_face_bounds(self): + self.assertEqual(len(self.face.bounds.vertices), 4) + vertex = self.face.bounds.vertices[0] + self.assertEqual(vertex.x_coordinate, 748) + self.assertEqual(vertex.y_coordinate, 58) + + def test_facial_skin_bounds(self): + self.assertEqual(len(self.face.fd_bounds.vertices), 4) + vertex = self.face.bounds.vertices[1] + self.assertEqual(vertex.x_coordinate, 1430) + self.assertEqual(vertex.y_coordinate, 58) diff --git a/vision/tests/unit/test_feature.py b/vision/tests/unit/test_feature.py new file mode 100644 index 000000000000..322b5c6f52ac --- /dev/null +++ b/vision/tests/unit/test_feature.py @@ -0,0 +1,52 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestFeature(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.feature import Feature + + return Feature + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_construct_feature(self): + from google.cloud.vision.feature import FeatureTypes + + feature = self._make_one(FeatureTypes.LABEL_DETECTION) + self.assertEqual(feature.max_results, 1) + self.assertEqual(feature.feature_type, 'LABEL_DETECTION') + + feature = self._make_one(FeatureTypes.FACE_DETECTION, 3) + self.assertEqual(feature.max_results, 3) + self.assertEqual(feature.feature_type, 'FACE_DETECTION') + + def test_feature_as_dict(self): + from google.cloud.vision.feature import FeatureTypes + + feature = self._make_one(FeatureTypes.FACE_DETECTION, max_results=5) + expected = { + 'type': 'FACE_DETECTION', + 'maxResults': 5 + } + self.assertEqual(feature.as_dict(), expected) + + def test_bad_feature_type(self): + with self.assertRaises(AttributeError): + self._make_one('something_not_feature_type', + max_results=5) diff --git a/vision/tests/unit/test_geometry.py b/vision/tests/unit/test_geometry.py new file mode 100644 index 000000000000..6b4e8c50e0d3 --- /dev/null +++ b/vision/tests/unit/test_geometry.py @@ -0,0 +1,31 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestVertex(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.geometry import Vertex + + return Vertex + + def _make_one(self, x_coordinate, y_coordinate): + return self._get_target_class()(x_coordinate, y_coordinate) + + def test_vertex_with_zeros(self): + vertex = self._make_one(0.0, 0.0) + self.assertEqual(vertex.x_coordinate, 0.0) + self.assertEqual(vertex.y_coordinate, 0.0) diff --git a/vision/tests/vision/test_helpers.py b/vision/tests/unit/test_helpers.py similarity index 87% rename from vision/tests/vision/test_helpers.py rename to vision/tests/unit/test_helpers.py index 39588dd887f4..0bdaaa1d260c 100644 --- a/vision/tests/vision/test_helpers.py +++ b/vision/tests/unit/test_helpers.py @@ -20,7 +20,7 @@ from google.auth.credentials import Credentials from google.cloud.vision_v1 import ImageAnnotatorClient -from google.cloud.vision_v1 import image_annotator +from google.cloud.vision_v1 import types @@ -32,10 +32,10 @@ def setUp(self): @mock.patch.object(ImageAnnotatorClient, 'batch_annotate_images') def test_all_features_default(self, batch_annotate): # Set up an image annotation request with no features. - image = image_annotator.Image(source={ + image = types.Image(source={ 'image_uri': 'http://foo.com/img.jpg', }) - request = image_annotator.AnnotateImageRequest(image=image) + request = types.AnnotateImageRequest(image=image) assert not request.features # Perform the single image request. @@ -58,15 +58,15 @@ def test_all_features_default(self, batch_annotate): @mock.patch.object(ImageAnnotatorClient, 'batch_annotate_images') def test_explicit_features(self, batch_annotate): # Set up an image annotation request with no features. - image = image_annotator.Image(source={ + image = types.Image(source={ 'image_uri': 'http://foo.com/img.jpg', }) - request = image_annotator.AnnotateImageRequest( + request = types.AnnotateImageRequest( image=image, features=[ - image_annotator.Feature(type=1), - image_annotator.Feature(type=2), - image_annotator.Feature(type=3), + types.Feature(type=1), + types.Feature(type=2), + types.Feature(type=3), ], ) diff --git a/vision/tests/unit/test_image.py b/vision/tests/unit/test_image.py new file mode 100644 index 000000000000..2435bc39ac4b --- /dev/null +++ b/vision/tests/unit/test_image.py @@ -0,0 +1,119 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import unittest + +from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _bytes_to_unicode + +IMAGE_SOURCE = 'gs://some/image.jpg' +IMAGE_CONTENT = _to_bytes('/9j/4QNURXhpZgAASUkq') +B64_IMAGE_CONTENT = _bytes_to_unicode(base64.b64encode(IMAGE_CONTENT)) +CLIENT_MOCK = {'source': ''} + + +class TestVisionImage(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.image import Image + + return Image + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_must_set_one_source(self): + with self.assertRaises(ValueError): + self._make_one(CLIENT_MOCK) + + with self.assertRaises(ValueError): + self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT, + source_uri=IMAGE_SOURCE) + + with self.assertRaises(ValueError): + self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT, + source_uri=IMAGE_SOURCE, filename='myimage.jpg') + + image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) + self.assertEqual(image.content, IMAGE_CONTENT) + + def test_image_source_type_content(self): + image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) + + as_dict = { + 'content': B64_IMAGE_CONTENT, + } + + self.assertEqual(image.content, IMAGE_CONTENT) + self.assertIsNone(image.source) + self.assertEqual(image.as_dict(), as_dict) + + def test_image_source_type_google_cloud_storage(self): + image = self._make_one(CLIENT_MOCK, source_uri=IMAGE_SOURCE) + + as_dict = { + 'source': { + 'gcs_image_uri': IMAGE_SOURCE, + } + } + + self.assertEqual(IMAGE_SOURCE, image.source) + self.assertEqual(None, image.content) + self.assertEqual(image.as_dict(), as_dict) + + def test_image_source_type_image_url(self): + url = 'http://www.example.com/image.jpg' + image = self._make_one(CLIENT_MOCK, source_uri=url) + as_dict = { + 'source': { + 'image_uri': url, + }, + } + + self.assertEqual(image.source, url) + self.assertIsNone(image.content) + self.assertEqual(image.as_dict(), as_dict) + + def test_image_no_valid_image_data(self): + image = self._make_one(CLIENT_MOCK, source_uri='ftp://notsupported') + with self.assertRaises(ValueError): + image.as_dict() + + def test_cannot_set_both_source_and_content(self): + image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) + + self.assertEqual(image.content, IMAGE_CONTENT) + with self.assertRaises(AttributeError): + image.source = IMAGE_SOURCE + + image = self._make_one(CLIENT_MOCK, source_uri=IMAGE_SOURCE) + self.assertEqual(IMAGE_SOURCE, image.source) + with self.assertRaises(AttributeError): + image.content = IMAGE_CONTENT + + def test_image_from_filename(self): + from mock import mock_open + from mock import patch + + as_dict = { + 'content': B64_IMAGE_CONTENT, + } + + with patch('google.cloud.vision.image.open', + mock_open(read_data=IMAGE_CONTENT)) as m: + image = self._make_one(CLIENT_MOCK, filename='my-image-file.jpg') + m.assert_called_once_with('my-image-file.jpg', 'rb') + self.assertEqual(image.content, IMAGE_CONTENT) + self.assertEqual(image.as_dict(), as_dict) diff --git a/vision/tests/unit/test_safe_search.py b/vision/tests/unit/test_safe_search.py new file mode 100644 index 000000000000..4d6d2882cb98 --- /dev/null +++ b/vision/tests/unit/test_safe_search.py @@ -0,0 +1,71 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestSafeSearchAnnotation(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.safe_search import SafeSearchAnnotation + + return SafeSearchAnnotation + + def test_safe_search_annotation(self): + from google.cloud.vision.likelihood import Likelihood + from tests.unit._fixtures import SAFE_SEARCH_DETECTION_RESPONSE + + response = SAFE_SEARCH_DETECTION_RESPONSE['responses'][0] + safe_search_response = response['safeSearchAnnotation'] + + safe_search = self._get_target_class().from_api_repr( + safe_search_response) + + self.assertIs(safe_search.adult, Likelihood.VERY_UNLIKELY) + self.assertIs(safe_search.spoof, Likelihood.UNLIKELY) + self.assertIs(safe_search.medical, Likelihood.POSSIBLE) + self.assertIs(safe_search.violence, Likelihood.VERY_UNLIKELY) + + def test_pb_safe_search_annotation(self): + from google.cloud.vision.likelihood import Likelihood + from google.cloud.proto.vision.v1.image_annotator_pb2 import ( + Likelihood as LikelihoodPB) + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + possible = LikelihoodPB.Value('POSSIBLE') + possible_name = Likelihood.POSSIBLE + safe_search_annotation = image_annotator_pb2.SafeSearchAnnotation( + adult=possible, spoof=possible, medical=possible, violence=possible + ) + + safe_search = self._get_target_class().from_pb(safe_search_annotation) + + self.assertIs(safe_search.adult, possible_name) + self.assertIs(safe_search.spoof, possible_name) + self.assertIs(safe_search.medical, possible_name) + self.assertIs(safe_search.violence, possible_name) + + def test_empty_pb_safe_search_annotation(self): + from google.cloud.vision.likelihood import Likelihood + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + unknown = Likelihood.UNKNOWN + safe_search_annotation = image_annotator_pb2.SafeSearchAnnotation() + + safe_search = self._get_target_class().from_pb(safe_search_annotation) + + self.assertIs(safe_search.adult, unknown) + self.assertIs(safe_search.spoof, unknown) + self.assertIs(safe_search.medical, unknown) + self.assertIs(safe_search.violence, unknown) diff --git a/vision/tests/unit/test_text.py b/vision/tests/unit/test_text.py new file mode 100644 index 000000000000..3b5df496d299 --- /dev/null +++ b/vision/tests/unit/test_text.py @@ -0,0 +1,47 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestTextAnnotatin(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.text import TextAnnotation + return TextAnnotation + + def test_text_annotation_from_api_repr(self): + annotation = { + 'pages': [], + 'text': 'some detected text', + } + text_annotation = self._get_target_class().from_api_repr(annotation) + self.assertIsInstance(text_annotation, self._get_target_class()) + self.assertEqual(len(text_annotation.pages), 0) + self.assertEqual(text_annotation.text, annotation['text']) + + def test_text_annotation_from_pb(self): + from google.cloud.proto.vision.v1 import text_annotation_pb2 + + page = text_annotation_pb2.Page(width=8, height=11) + text = 'some detected text' + text_annotation_pb = text_annotation_pb2.TextAnnotation( + pages=[page], text=text) + + text_annotation = self._get_target_class().from_pb(text_annotation_pb) + self.assertIsInstance(text_annotation, self._get_target_class()) + self.assertEqual(len(text_annotation.pages), 1) + self.assertEqual(text_annotation.pages[0].width, 8) + self.assertEqual(text_annotation.pages[0].height, 11) + self.assertEqual(text_annotation.text, text) diff --git a/vision/tests/unit/test_web.py b/vision/tests/unit/test_web.py new file mode 100644 index 000000000000..9f91d883b1b9 --- /dev/null +++ b/vision/tests/unit/test_web.py @@ -0,0 +1,227 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestWebDetection(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.web import WebDetection + return WebDetection + + def _make_one(self, web_entities, full_matching_images, + partial_matching_images, pages_with_matching_images): + return self._get_target_class()(web_entities, full_matching_images, + partial_matching_images, + pages_with_matching_images) + + def test_web_detection_ctor(self): + web_detection = self._make_one(1, 2, 3, 4) + self.assertEqual(web_detection.web_entities, 1) + self.assertEqual(web_detection.full_matching_images, 2) + self.assertEqual(web_detection.partial_matching_images, 3) + self.assertEqual(web_detection.pages_with_matching_images, 4) + + def test_web_detection_from_api_repr(self): + from google.cloud.vision.web import WebEntity + from google.cloud.vision.web import WebImage + from google.cloud.vision.web import WebPage + + web_detection_dict = { + 'partialMatchingImages': [{ + 'url': 'https://cloud.google.com/vision', + 'score': 0.92234, + }], + 'fullMatchingImages': [{ + 'url': 'https://cloud.google.com/vision', + 'score': 0.92234, + }], + 'webEntities': [{ + 'entityId': '/m/05_5t0l', + 'score': 0.9468027, + 'description': 'Landmark' + }], + 'pagesWithMatchingImages': [{ + 'url': 'https://cloud.google.com/vision', + 'score': 0.92234, + }], + } + web_detection = self._get_target_class().from_api_repr( + web_detection_dict) + self.assertEqual(len(web_detection.partial_matching_images), 1) + self.assertEqual(len(web_detection.full_matching_images), 1) + self.assertEqual(len(web_detection.web_entities), 1) + self.assertEqual(len(web_detection.pages_with_matching_images), 1) + + self.assertIsInstance(web_detection.partial_matching_images[0], + WebImage) + self.assertIsInstance(web_detection.full_matching_images[0], WebImage) + self.assertIsInstance(web_detection.web_entities[0], WebEntity) + self.assertIsInstance(web_detection.pages_with_matching_images[0], + WebPage) + + def test_web_detection_from_pb(self): + from google.cloud.proto.vision.v1 import web_detection_pb2 + from google.cloud.vision.web import WebEntity + from google.cloud.vision.web import WebImage + from google.cloud.vision.web import WebPage + + description = 'Some images like the image you have.' + entity_id = '/m/019dvv' + score = 1470.4435 + url = 'http://cloud.google.com/vision' + + web_entity_pb = web_detection_pb2.WebDetection.WebEntity( + entity_id=entity_id, score=score, description=description) + + web_image_pb = web_detection_pb2.WebDetection.WebImage( + url=url, score=score) + + web_page_pb = web_detection_pb2.WebDetection.WebPage( + url=url, score=score) + + web_detection_pb = web_detection_pb2.WebDetection( + web_entities=[web_entity_pb], full_matching_images=[web_image_pb], + partial_matching_images=[web_image_pb], + pages_with_matching_images=[web_page_pb]) + web_detection = self._get_target_class().from_pb(web_detection_pb) + self.assertEqual(len(web_detection.web_entities), 1) + self.assertEqual(len(web_detection.full_matching_images), 1) + self.assertEqual(len(web_detection.partial_matching_images), 1) + self.assertEqual(len(web_detection.pages_with_matching_images), 1) + self.assertIsInstance(web_detection.web_entities[0], WebEntity) + self.assertIsInstance(web_detection.full_matching_images[0], WebImage) + self.assertIsInstance(web_detection.partial_matching_images[0], + WebImage) + self.assertIsInstance(web_detection.pages_with_matching_images[0], + WebPage) + + +class TestWebEntity(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.web import WebEntity + return WebEntity + + def _make_one(self, entity_id, score, description): + return self._get_target_class()(entity_id, score, description) + + def test_web_entity_ctor(self): + entity_id = 'm/abc123' + score = 0.13245 + description = 'This is an image from the web that matches your image.' + web_entity = self._make_one(entity_id, score, description) + self.assertEqual(web_entity.entity_id, entity_id) + self.assertAlmostEqual(web_entity.score, score, 4) + self.assertEqual(web_entity.description, description) + + def test_web_entity_from_api_repr(self): + entity_dict = { + 'entityId': '/m/019dvv', + 'score': 1470.4435, + 'description': 'Mount Rushmore National Memorial', + } + web_entity = self._get_target_class().from_api_repr(entity_dict) + + self.assertEqual(web_entity.entity_id, entity_dict['entityId']) + self.assertAlmostEqual(web_entity.score, entity_dict['score'], 4) + self.assertEqual(web_entity.description, entity_dict['description']) + + def test_web_entity_from_pb(self): + from google.cloud.proto.vision.v1 import web_detection_pb2 + + entity_id = '/m/019dvv' + score = 1470.4435 + description = 'Some images like the image you have.' + web_entity_pb = web_detection_pb2.WebDetection.WebEntity( + entity_id=entity_id, score=score, description=description) + web_entity = self._get_target_class().from_pb(web_entity_pb) + self.assertEqual(web_entity.entity_id, entity_id) + self.assertAlmostEqual(web_entity.score, score, 4) + self.assertEqual(web_entity.description, description) + + +class TestWebImage(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.web import WebImage + return WebImage + + def _make_one(self, url, score): + return self._get_target_class()(url, score) + + def test_web_image_ctor(self): + url = 'http://cloud.google.com/vision' + score = 1234.23 + web_image = self._make_one(url, score) + self.assertEqual(web_image.url, url) + self.assertAlmostEqual(web_image.score, score, 4) + + def test_web_image_from_api_repr(self): + web_image_dict = { + 'url': 'http://cloud.google.com/vision', + 'score': 1234.23, + } + web_image = self._get_target_class().from_api_repr(web_image_dict) + self.assertEqual(web_image.url, web_image_dict['url']) + self.assertAlmostEqual(web_image.score, web_image_dict['score']) + + def test_web_image_from_pb(self): + from google.cloud.proto.vision.v1 import web_detection_pb2 + + url = 'http://cloud.google.com/vision' + score = 1234.23 + web_image_pb = web_detection_pb2.WebDetection.WebImage( + url=url, score=score) + web_image = self._get_target_class().from_pb(web_image_pb) + self.assertEqual(web_image.url, url) + self.assertAlmostEqual(web_image.score, score, 4) + + +class TestWebPage(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.web import WebPage + return WebPage + + def _make_one(self, url, score): + return self._get_target_class()(url, score) + + def test_web_page_ctor(self): + url = 'http://cloud.google.com/vision' + score = 1234.23 + web_page = self._make_one(url, score) + self.assertEqual(web_page.url, url) + self.assertAlmostEqual(web_page.score, score, 4) + + def test_web_page_from_api_repr(self): + web_page_dict = { + 'url': 'http://cloud.google.com/vision', + 'score': 1234.23, + } + web_page = self._get_target_class().from_api_repr(web_page_dict) + self.assertEqual(web_page.url, web_page_dict['url']) + self.assertAlmostEqual(web_page.score, web_page_dict['score'], 4) + + def test_web_page_from_pb(self): + from google.cloud.proto.vision.v1 import web_detection_pb2 + + url = 'http://cloud.google.com/vision' + score = 1234.23 + web_page_pb = web_detection_pb2.WebDetection.WebPage( + url=url, score=score) + web_page = self._get_target_class().from_pb(web_page_pb) + self.assertEqual(web_page.url, url) + self.assertAlmostEqual(web_page.score, score, 4)