Skip to content
This repository has been archived by the owner on Jul 13, 2023. It is now read-only.

Re-generate library using /synth.py #110

Merged
merged 1 commit into from
Oct 10, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -352,6 +352,13 @@ message NormalizedBoundingPoly {
message TextSegment {
// Video segment where a text snippet was detected.
VideoSegment segment = 1;

// Confidence for the track of detected text. It is calculated as the highest
// over all frames where OCR detected text appears.
float confidence = 2;

// Information related to the frames where OCR detected text appears.
repeated TextFrame frames = 3;
}

// Video frame level annotation results for text annotation (OCR).
Expand All @@ -372,15 +379,8 @@ message TextAnnotation {
// The detected text.
string text = 1;

// Confidence for the track of detected text. It is calculated as the highest
// over all frames where OCR detected text appears.
float confidence = 2;

// Information related to the frames where OCR detected text appears.
repeated TextFrame frames = 3;

// All video segments where OCR detected text appears.
repeated TextSegment segments = 4;
repeated TextSegment segments = 2;
}

// Video frame level annotations for object detection and tracking. This field
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,15 @@ const NormalizedBoundingPoly = {
*
* This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p2beta1.VideoSegment}
*
* @property {number} confidence
* Confidence for the track of detected text. It is calculated as the highest
* over all frames where OCR detected text appears.
*
* @property {Object[]} frames
* Information related to the frames where OCR detected text appears.
*
* This object should have the same structure as [TextFrame]{@link google.cloud.videointelligence.v1p2beta1.TextFrame}
*
* @typedef TextSegment
* @memberof google.cloud.videointelligence.v1p2beta1
* @see [google.cloud.videointelligence.v1p2beta1.TextSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
Expand Down Expand Up @@ -589,15 +598,6 @@ const TextFrame = {
* @property {string} text
* The detected text.
*
* @property {number} confidence
* Confidence for the track of detected text. It is calculated as the highest
* over all frames where OCR detected text appears.
*
* @property {Object[]} frames
* Information related to the frames where OCR detected text appears.
*
* This object should have the same structure as [TextFrame]{@link google.cloud.videointelligence.v1p2beta1.TextFrame}
*
* @property {Object[]} segments
* All video segments where OCR detected text appears.
*
Expand Down