Skip to content
This repository has been archived by the owner on Nov 29, 2023. It is now read-only.

docs: Add documentation for enums #398

Merged
merged 2 commits into from
Jan 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def sample_annotate_video():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "VideoIntelligenceServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
57 changes: 54 additions & 3 deletions google/cloud/videointelligence_v1/types/video_intelligence.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,31 @@


class Feature(proto.Enum):
r"""Video annotation feature."""
r"""Video annotation feature.

Values:
FEATURE_UNSPECIFIED (0):
Unspecified.
LABEL_DETECTION (1):
Label detection. Detect objects, such as dog
or flower.
SHOT_CHANGE_DETECTION (2):
Shot change detection.
EXPLICIT_CONTENT_DETECTION (3):
Explicit content detection.
FACE_DETECTION (4):
Human face detection.
SPEECH_TRANSCRIPTION (6):
Speech transcription.
TEXT_DETECTION (7):
OCR text detection and tracking.
OBJECT_TRACKING (9):
Object detection and tracking.
LOGO_RECOGNITION (12):
Logo detection, tracking, and recognition.
PERSON_DETECTION (14):
Person detection.
"""
FEATURE_UNSPECIFIED = 0
LABEL_DETECTION = 1
SHOT_CHANGE_DETECTION = 2
Expand All @@ -88,15 +112,42 @@ class Feature(proto.Enum):


class LabelDetectionMode(proto.Enum):
r"""Label detection mode."""
r"""Label detection mode.

Values:
LABEL_DETECTION_MODE_UNSPECIFIED (0):
Unspecified.
SHOT_MODE (1):
Detect shot-level labels.
FRAME_MODE (2):
Detect frame-level labels.
SHOT_AND_FRAME_MODE (3):
Detect both shot-level and frame-level
labels.
"""
LABEL_DETECTION_MODE_UNSPECIFIED = 0
SHOT_MODE = 1
FRAME_MODE = 2
SHOT_AND_FRAME_MODE = 3


class Likelihood(proto.Enum):
r"""Bucketized representation of likelihood."""
r"""Bucketized representation of likelihood.

Values:
LIKELIHOOD_UNSPECIFIED (0):
Unspecified likelihood.
VERY_UNLIKELY (1):
Very unlikely.
UNLIKELY (2):
Unlikely.
POSSIBLE (3):
Possible.
LIKELY (4):
Likely.
VERY_LIKELY (5):
Very likely.
"""
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def sample_annotate_video():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "VideoIntelligenceServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
47 changes: 44 additions & 3 deletions google/cloud/videointelligence_v1beta2/types/video_intelligence.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,21 @@


class Feature(proto.Enum):
r"""Video annotation feature."""
r"""Video annotation feature.

Values:
FEATURE_UNSPECIFIED (0):
Unspecified.
LABEL_DETECTION (1):
Label detection. Detect objects, such as dog
or flower.
SHOT_CHANGE_DETECTION (2):
Shot change detection.
EXPLICIT_CONTENT_DETECTION (3):
Explicit content detection.
FACE_DETECTION (4):
Human face detection and tracking.
"""
FEATURE_UNSPECIFIED = 0
LABEL_DETECTION = 1
SHOT_CHANGE_DETECTION = 2
Expand All @@ -61,15 +75,42 @@ class Feature(proto.Enum):


class LabelDetectionMode(proto.Enum):
r"""Label detection mode."""
r"""Label detection mode.

Values:
LABEL_DETECTION_MODE_UNSPECIFIED (0):
Unspecified.
SHOT_MODE (1):
Detect shot-level labels.
FRAME_MODE (2):
Detect frame-level labels.
SHOT_AND_FRAME_MODE (3):
Detect both shot-level and frame-level
labels.
"""
LABEL_DETECTION_MODE_UNSPECIFIED = 0
SHOT_MODE = 1
FRAME_MODE = 2
SHOT_AND_FRAME_MODE = 3


class Likelihood(proto.Enum):
r"""Bucketized representation of likelihood."""
r"""Bucketized representation of likelihood.

Values:
LIKELIHOOD_UNSPECIFIED (0):
Unspecified likelihood.
VERY_UNLIKELY (1):
Very unlikely.
UNLIKELY (2):
Unlikely.
POSSIBLE (3):
Possible.
LIKELY (4):
Likely.
VERY_LIKELY (5):
Very likely.
"""
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def sample_annotate_video():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "VideoIntelligenceServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,21 @@


class Feature(proto.Enum):
r"""Video annotation feature."""
r"""Video annotation feature.

Values:
FEATURE_UNSPECIFIED (0):
Unspecified.
LABEL_DETECTION (1):
Label detection. Detect objects, such as dog
or flower.
SHOT_CHANGE_DETECTION (2):
Shot change detection.
EXPLICIT_CONTENT_DETECTION (3):
Explicit content detection.
SPEECH_TRANSCRIPTION (6):
Speech transcription.
"""
FEATURE_UNSPECIFIED = 0
LABEL_DETECTION = 1
SHOT_CHANGE_DETECTION = 2
Expand All @@ -61,15 +75,42 @@ class Feature(proto.Enum):


class LabelDetectionMode(proto.Enum):
r"""Label detection mode."""
r"""Label detection mode.

Values:
LABEL_DETECTION_MODE_UNSPECIFIED (0):
Unspecified.
SHOT_MODE (1):
Detect shot-level labels.
FRAME_MODE (2):
Detect frame-level labels.
SHOT_AND_FRAME_MODE (3):
Detect both shot-level and frame-level
labels.
"""
LABEL_DETECTION_MODE_UNSPECIFIED = 0
SHOT_MODE = 1
FRAME_MODE = 2
SHOT_AND_FRAME_MODE = 3


class Likelihood(proto.Enum):
r"""Bucketized representation of likelihood."""
r"""Bucketized representation of likelihood.

Values:
LIKELIHOOD_UNSPECIFIED (0):
Unspecified likelihood.
VERY_UNLIKELY (1):
Very unlikely.
UNLIKELY (2):
Unlikely.
POSSIBLE (3):
Possible.
LIKELY (4):
Likely.
VERY_LIKELY (5):
Very likely.
"""
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def sample_annotate_video():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "VideoIntelligenceServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,23 @@


class Feature(proto.Enum):
r"""Video annotation feature."""
r"""Video annotation feature.

Values:
FEATURE_UNSPECIFIED (0):
Unspecified.
LABEL_DETECTION (1):
Label detection. Detect objects, such as dog
or flower.
SHOT_CHANGE_DETECTION (2):
Shot change detection.
EXPLICIT_CONTENT_DETECTION (3):
Explicit content detection.
TEXT_DETECTION (7):
OCR text detection and tracking.
OBJECT_TRACKING (9):
Object detection and tracking.
"""
FEATURE_UNSPECIFIED = 0
LABEL_DETECTION = 1
SHOT_CHANGE_DETECTION = 2
Expand All @@ -66,15 +82,42 @@ class Feature(proto.Enum):


class LabelDetectionMode(proto.Enum):
r"""Label detection mode."""
r"""Label detection mode.

Values:
LABEL_DETECTION_MODE_UNSPECIFIED (0):
Unspecified.
SHOT_MODE (1):
Detect shot-level labels.
FRAME_MODE (2):
Detect frame-level labels.
SHOT_AND_FRAME_MODE (3):
Detect both shot-level and frame-level
labels.
"""
LABEL_DETECTION_MODE_UNSPECIFIED = 0
SHOT_MODE = 1
FRAME_MODE = 2
SHOT_AND_FRAME_MODE = 3


class Likelihood(proto.Enum):
r"""Bucketized representation of likelihood."""
r"""Bucketized representation of likelihood.

Values:
LIKELIHOOD_UNSPECIFIED (0):
Unspecified likelihood.
VERY_UNLIKELY (1):
Very unlikely.
UNLIKELY (2):
Unlikely.
POSSIBLE (3):
Possible.
LIKELY (4):
Likely.
VERY_LIKELY (5):
Very likely.
"""
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ def request_generator():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "StreamingVideoIntelligenceServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def sample_annotate_video():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "VideoIntelligenceServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
Loading