From ab9000ca57f20ba8b2d1506677c0e44358bb5d4e Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Fri, 14 Feb 2020 15:12:05 -0700 Subject: [PATCH 1/7] video: adding face detection and person detection samples for beta --- .../analyze/video_detect_faces_beta.py | 85 ++++++++++++++++ .../analyze/video_detect_faces_beta_test.py | 30 ++++++ .../analyze/video_detect_faces_gcs_beta.py | 81 +++++++++++++++ .../video_detect_faces_gcs_beta_test.py | 30 ++++++ .../analyze/video_detect_person_beta.py | 99 +++++++++++++++++++ .../analyze/video_detect_person_beta_test.py | 32 ++++++ .../analyze/video_detect_person_gcs_beta.py | 95 ++++++++++++++++++ .../video_detect_person_gcs_beta_test.py | 32 ++++++ 8 files changed, 484 insertions(+) create mode 100644 video/cloud-client/analyze/video_detect_faces_beta.py create mode 100644 video/cloud-client/analyze/video_detect_faces_beta_test.py create mode 100644 video/cloud-client/analyze/video_detect_faces_gcs_beta.py create mode 100644 video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py create mode 100644 video/cloud-client/analyze/video_detect_person_beta.py create mode 100644 video/cloud-client/analyze/video_detect_person_beta_test.py create mode 100644 video/cloud-client/analyze/video_detect_person_gcs_beta.py create mode 100644 video/cloud-client/analyze/video_detect_person_gcs_beta_test.py diff --git a/video/cloud-client/analyze/video_detect_faces_beta.py b/video/cloud-client/analyze/video_detect_faces_beta.py new file mode 100644 index 000000000000..987de286ffcc --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_beta.py @@ -0,0 +1,85 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_faces_gcs_beta] +import io +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_face(local_file_path="path/to/your/video-file.mp4"): + """Detects faces in a video from a local file.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + with io.open(local_file_path, "rb") as f: + input_content = f.read() + + # Configure the request + config = videointelligence.types.FaceDetectionConfig( + include_bounding_boxes=True, include_attributes=True + ) + context = videointelligence.types.VideoContext( + face_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_content=input_content, + features=[videointelligence.enums.Feature.FACE_DETECTION], + video_context=context, + ) + + print("\nProcessing video for face detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.face_detection_annotations: + print("Face detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped objects that include + # characteristics of the face detected. + # Grab the first timestamped object + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) + + # Attributes include glasses, headwear, facial hair, smiling, + # direction of gaze, etc. + print("\tAttributes:") + for attribute in timestamped_object.attributes: + print( + "\t\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + +# [END video_detect_faces_gcs_beta] diff --git a/video/cloud-client/analyze/video_detect_faces_beta_test.py b/video/cloud-client/analyze/video_detect_faces_beta_test.py new file mode 100644 index 000000000000..7fac2db58d57 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_beta_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_faces_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_person(capsys): + local_file_path = os.path.join(RESOURCES, "googlework_short.mp4") + + video_detect_faces_beta.detect_face(local_file_path=local_file_path) + + out, _ = capsys.readouterr() + + assert "Face detected:" in out + assert "Attributes:" in out diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py new file mode 100644 index 000000000000..c5beb6af0de3 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py @@ -0,0 +1,81 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_faces_gcs_beta] +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_face(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): + """Detects faces in a video.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + # Configure the request + config = videointelligence.types.FaceDetectionConfig( + include_bounding_boxes=True, include_attributes=True + ) + context = videointelligence.types.VideoContext( + face_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_uri=gcs_uri, + features=[videointelligence.enums.Feature.FACE_DETECTION], + video_context=context, + ) + + print("\nProcessing video for face detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.face_detection_annotations: + print("Face detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped objects that include + # characteristics of the face detected. + # Grab the first timestamped object + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) + + # Attributes include glasses, headwear, facial hair, smiling, + # direction of gaze, etc. + print("\tAttributes:") + for attribute in timestamped_object.attributes: + print( + "\t\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + +# [END video_detect_faces_gcs_beta] diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py new file mode 100644 index 000000000000..118572383f00 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_faces_gcs_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_person(capsys): + input_uri = "gs://cloud-samples-data/video/googlework_short.mp4" + + video_detect_faces_gcs_beta.detect_face(gcs_uri=input_uri) + + out, _ = capsys.readouterr() + + assert "Face detected:" in out + assert "Attributes:" in out diff --git a/video/cloud-client/analyze/video_detect_person_beta.py b/video/cloud-client/analyze/video_detect_person_beta.py new file mode 100644 index 000000000000..5d469a13902d --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_beta.py @@ -0,0 +1,99 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_person_beta] +import io +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_person(local_file_path="path/to/your/video-file.mp4"): + """Detects people in a video from a local file.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + with io.open(local_file_path, "rb") as f: + input_content = f.read() + + # Configure the request + config = videointelligence.types.PersonDetectionConfig( + include_bounding_boxes=True, + include_attributes=True, + include_pose_landmarks=True, + ) + context = videointelligence.types.VideoContext( + person_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_content=input_content, + features=[videointelligence.enums.Feature.PERSON_DETECTION], + video_context=context, + ) + + print("\nProcessing video for person detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.person_detection_annotations: + print("Person detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped objects that include + # characteristic - -e.g.clothes, posture of the person detected. + # Grab the first timestamped object + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) + + # Attributes include unique pieces of clothing, + # poses, or hair color. + print("\tAttributes:") + for attribute in timestamped_object.attributes: + print( + "\t\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + # Landmarks in person detection include body parts + print("\tLandmarks:") + for landmark in timestamped_object.landmarks: + print( + "\t\t{}: {} (x={}, y={})".format( + landmark.name, + landmark.confidence, + landmark.point.x, + landmark.point.y, + ) + ) + + +# [END video_detect_person_beta] diff --git a/video/cloud-client/analyze/video_detect_person_beta_test.py b/video/cloud-client/analyze/video_detect_person_beta_test.py new file mode 100644 index 000000000000..567fed336a32 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_beta_test.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_person_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_person(capsys): + local_file_path = os.path.join(RESOURCES, "googlework_tiny.mp4") + + video_detect_person_beta.detect_person(local_file_path=local_file_path) + + out, _ = capsys.readouterr() + + assert "Person detected:" in out + assert "Attributes:" in out + assert "x=" in out + assert "y=" in out diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta.py b/video/cloud-client/analyze/video_detect_person_gcs_beta.py new file mode 100644 index 000000000000..ea2e181c6ed7 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta.py @@ -0,0 +1,95 @@ +# +# Copyright 2020 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_person_gcs_beta] +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): + """Detects people in a video.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + # Configure the request + config = videointelligence.types.PersonDetectionConfig( + include_bounding_boxes=True, + include_attributes=True, + include_pose_landmarks=True, + ) + context = videointelligence.types.VideoContext( + person_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_uri=gcs_uri, + features=[videointelligence.enums.Feature.PERSON_DETECTION], + video_context=context, + ) + + print("\nProcessing video for person detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.person_detection_annotations: + print("Person detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped objects that include + # characteristics - -e.g.clothes, posture of the person detected. + # Grab the first timestamped object + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) + + # Attributes include unique pieces of clothing, + # poses, or hair color. + print("\tAttributes:") + for attribute in timestamped_object.attributes: + print( + "\t\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + # Landmarks in person detection include body parts + print("\tLandmarks:") + for landmark in timestamped_object.landmarks: + print( + "\t\t{}: {} (x={}, y={})".format( + landmark.name, + landmark.confidence, + landmark.point.x, + landmark.point.y, + ) + ) + + +# [END video_detect_person_gcs_beta] diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta_test.py b/video/cloud-client/analyze/video_detect_person_gcs_beta_test.py new file mode 100644 index 000000000000..521959253bac --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta_test.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_person_gcs_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_person(capsys): + input_uri = "gs://cloud-samples-data/video/googlework_tiny.mp4" + + video_detect_person_gcs_beta.detect_person(gcs_uri=input_uri) + + out, _ = capsys.readouterr() + + assert "Person detected:" in out + assert "Attributes:" in out + assert "x=" in out + assert "y=" in out From 6d855f3a183ee7bf147a097c4b65b5dd51c982fc Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Fri, 14 Feb 2020 15:38:47 -0700 Subject: [PATCH 2/7] updating requirements.txt --- video/cloud-client/analyze/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/video/cloud-client/analyze/requirements.txt b/video/cloud-client/analyze/requirements.txt index ba966ee8deb7..cf61c0964c59 100644 --- a/video/cloud-client/analyze/requirements.txt +++ b/video/cloud-client/analyze/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-videointelligence==1.12.1 +google-cloud-videointelligence==1.13.0 google-cloud-storage==1.23.0 From 1eeef6c84cf3862e77fd279147236214aad44a70 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Tue, 18 Feb 2020 15:43:12 -0700 Subject: [PATCH 3/7] updating test names to faces --- video/cloud-client/analyze/video_detect_faces_beta_test.py | 2 +- video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/video/cloud-client/analyze/video_detect_faces_beta_test.py b/video/cloud-client/analyze/video_detect_faces_beta_test.py index 7fac2db58d57..98f7b30ffd3d 100644 --- a/video/cloud-client/analyze/video_detect_faces_beta_test.py +++ b/video/cloud-client/analyze/video_detect_faces_beta_test.py @@ -19,7 +19,7 @@ RESOURCES = os.path.join(os.path.dirname(__file__), "resources") -def test_detect_person(capsys): +def test_detect_faces(capsys): local_file_path = os.path.join(RESOURCES, "googlework_short.mp4") video_detect_faces_beta.detect_face(local_file_path=local_file_path) diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py index 118572383f00..5edb3d9e3db3 100644 --- a/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py @@ -19,7 +19,7 @@ RESOURCES = os.path.join(os.path.dirname(__file__), "resources") -def test_detect_person(capsys): +def test_detect_faces(capsys): input_uri = "gs://cloud-samples-data/video/googlework_short.mp4" video_detect_faces_gcs_beta.detect_face(gcs_uri=input_uri) From 53a27480182bf0984e33c604d4b2a723289c14e1 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Wed, 19 Feb 2020 09:59:34 -0700 Subject: [PATCH 4/7] fixing region tag typo --- video/cloud-client/analyze/video_detect_faces_beta.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/video/cloud-client/analyze/video_detect_faces_beta.py b/video/cloud-client/analyze/video_detect_faces_beta.py index 987de286ffcc..865181a0bc29 100644 --- a/video/cloud-client/analyze/video_detect_faces_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_beta.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START video_detect_faces_gcs_beta] +# [START video_detect_faces_beta] import io from google.cloud import videointelligence_v1p3beta1 as videointelligence @@ -82,4 +82,4 @@ def detect_face(local_file_path="path/to/your/video-file.mp4"): ) -# [END video_detect_faces_gcs_beta] +# [END video_detect_faces_beta] From 7df0185d5a0a1ad502618cd17c19a75c81c64986 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Wed, 19 Feb 2020 11:25:05 -0700 Subject: [PATCH 5/7] responding to comments --- .../analyze/video_detect_faces_beta.py | 16 ++++++++-------- .../analyze/video_detect_faces_beta_test.py | 2 +- .../analyze/video_detect_faces_gcs_beta.py | 16 ++++++++-------- .../analyze/video_detect_faces_gcs_beta_test.py | 2 +- .../analyze/video_detect_person_beta.py | 13 +++++++------ .../analyze/video_detect_person_gcs_beta.py | 13 +++++++------ 6 files changed, 32 insertions(+), 30 deletions(-) diff --git a/video/cloud-client/analyze/video_detect_faces_beta.py b/video/cloud-client/analyze/video_detect_faces_beta.py index 865181a0bc29..d7967643ff2e 100644 --- a/video/cloud-client/analyze/video_detect_faces_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_beta.py @@ -17,7 +17,7 @@ from google.cloud import videointelligence_v1p3beta1 as videointelligence -def detect_face(local_file_path="path/to/your/video-file.mp4"): +def detect_faces(local_file_path="path/to/your/video-file.mp4"): """Detects faces in a video from a local file.""" client = videointelligence.VideoIntelligenceServiceClient() @@ -60,16 +60,16 @@ def detect_face(local_file_path="path/to/your/video-file.mp4"): ) ) - # Each segment includes timestamped objects that include + # Each segment includes timestamped faces that include # characteristics of the face detected. - # Grab the first timestamped object + # Grab the first timestamped face timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include glasses, headwear, facial hair, smiling, # direction of gaze, etc. diff --git a/video/cloud-client/analyze/video_detect_faces_beta_test.py b/video/cloud-client/analyze/video_detect_faces_beta_test.py index 98f7b30ffd3d..916c11bba248 100644 --- a/video/cloud-client/analyze/video_detect_faces_beta_test.py +++ b/video/cloud-client/analyze/video_detect_faces_beta_test.py @@ -22,7 +22,7 @@ def test_detect_faces(capsys): local_file_path = os.path.join(RESOURCES, "googlework_short.mp4") - video_detect_faces_beta.detect_face(local_file_path=local_file_path) + video_detect_faces_beta.detect_faces(local_file_path=local_file_path) out, _ = capsys.readouterr() diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py index c5beb6af0de3..4f88d522940d 100644 --- a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py @@ -16,7 +16,7 @@ from google.cloud import videointelligence_v1p3beta1 as videointelligence -def detect_face(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): +def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): """Detects faces in a video.""" client = videointelligence.VideoIntelligenceServiceClient() @@ -56,16 +56,16 @@ def detect_face(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): ) ) - # Each segment includes timestamped objects that include + # Each segment includes timestamped faces that include # characteristics of the face detected. - # Grab the first timestamped object + # Grab the first timestamped face timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include glasses, headwear, facial hair, smiling, # direction of gaze, etc. diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py index 5edb3d9e3db3..611a6f38c1df 100644 --- a/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py @@ -22,7 +22,7 @@ def test_detect_faces(capsys): input_uri = "gs://cloud-samples-data/video/googlework_short.mp4" - video_detect_faces_gcs_beta.detect_face(gcs_uri=input_uri) + video_detect_faces_gcs_beta.detect_faces(gcs_uri=input_uri) out, _ = capsys.readouterr() diff --git a/video/cloud-client/analyze/video_detect_person_beta.py b/video/cloud-client/analyze/video_detect_person_beta.py index 5d469a13902d..5ca9f0a73383 100644 --- a/video/cloud-client/analyze/video_detect_person_beta.py +++ b/video/cloud-client/analyze/video_detect_person_beta.py @@ -67,11 +67,11 @@ def detect_person(local_file_path="path/to/your/video-file.mp4"): # Grab the first timestamped object timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include unique pieces of clothing, # poses, or hair color. @@ -83,7 +83,8 @@ def detect_person(local_file_path="path/to/your/video-file.mp4"): ) ) - # Landmarks in person detection include body parts + # Landmarks in person detection include body parts such as + # left_shoulder, right_ear, and right_ankle print("\tLandmarks:") for landmark in timestamped_object.landmarks: print( diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta.py b/video/cloud-client/analyze/video_detect_person_gcs_beta.py index ea2e181c6ed7..5015bfd30ddc 100644 --- a/video/cloud-client/analyze/video_detect_person_gcs_beta.py +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta.py @@ -63,11 +63,11 @@ def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): # Grab the first timestamped object timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include unique pieces of clothing, # poses, or hair color. @@ -79,7 +79,8 @@ def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): ) ) - # Landmarks in person detection include body parts + # Landmarks in person detection include body parts such as + # left_shoulder, right_ear, and right_ankle print("\tLandmarks:") for landmark in timestamped_object.landmarks: print( From a715e3c6404cbdbd434bbc4b0ceae418bc5e1f13 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Wed, 19 Feb 2020 14:45:13 -0700 Subject: [PATCH 6/7] reverted tabs to fix linting errors --- video/cloud-client/analyze/video_detect_faces_beta.py | 10 +++++----- .../analyze/video_detect_faces_gcs_beta.py | 10 +++++----- video/cloud-client/analyze/video_detect_person_beta.py | 10 +++++----- .../analyze/video_detect_person_gcs_beta.py | 10 +++++----- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/video/cloud-client/analyze/video_detect_faces_beta.py b/video/cloud-client/analyze/video_detect_faces_beta.py index d7967643ff2e..c7bab3086b63 100644 --- a/video/cloud-client/analyze/video_detect_faces_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_beta.py @@ -65,11 +65,11 @@ def detect_faces(local_file_path="path/to/your/video-file.mp4"): # Grab the first timestamped face timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include glasses, headwear, facial hair, smiling, # direction of gaze, etc. diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py index 4f88d522940d..80c23842e197 100644 --- a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py @@ -61,11 +61,11 @@ def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): # Grab the first timestamped face timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include glasses, headwear, facial hair, smiling, # direction of gaze, etc. diff --git a/video/cloud-client/analyze/video_detect_person_beta.py b/video/cloud-client/analyze/video_detect_person_beta.py index 5ca9f0a73383..770594ef3baf 100644 --- a/video/cloud-client/analyze/video_detect_person_beta.py +++ b/video/cloud-client/analyze/video_detect_person_beta.py @@ -67,11 +67,11 @@ def detect_person(local_file_path="path/to/your/video-file.mp4"): # Grab the first timestamped object timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include unique pieces of clothing, # poses, or hair color. diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta.py b/video/cloud-client/analyze/video_detect_person_gcs_beta.py index 5015bfd30ddc..99e3faf6deba 100644 --- a/video/cloud-client/analyze/video_detect_person_gcs_beta.py +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta.py @@ -63,11 +63,11 @@ def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): # Grab the first timestamped object timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("\tBounding box:") + print("\t\tleft : {}".format(box.left)) + print("\t\ttop : {}".format(box.top)) + print("\t\tright : {}".format(box.right)) + print("\t\tbottom: {}".format(box.bottom)) # Attributes include unique pieces of clothing, # poses, or hair color. From 2fc5d2b96a9fca7d41520ecde270f68b66b00b09 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Thu, 20 Feb 2020 10:14:51 -0700 Subject: [PATCH 7/7] responding to comments --- .../analyze/video_detect_faces_beta.py | 14 ++++++------ .../analyze/video_detect_faces_gcs_beta.py | 14 ++++++------ .../analyze/video_detect_person_beta.py | 22 +++++++++---------- .../analyze/video_detect_person_gcs_beta.py | 22 +++++++++---------- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/video/cloud-client/analyze/video_detect_faces_beta.py b/video/cloud-client/analyze/video_detect_faces_beta.py index c7bab3086b63..064c6197d572 100644 --- a/video/cloud-client/analyze/video_detect_faces_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_beta.py @@ -65,18 +65,18 @@ def detect_faces(local_file_path="path/to/your/video-file.mp4"): # Grab the first timestamped face timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) # Attributes include glasses, headwear, facial hair, smiling, # direction of gaze, etc. - print("\tAttributes:") + print("Attributes:") for attribute in timestamped_object.attributes: print( - "\t\t{}:{} {}".format( + "\t{}:{} {}".format( attribute.name, attribute.value, attribute.confidence ) ) diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py index 80c23842e197..e8fae6eea4bf 100644 --- a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py @@ -61,18 +61,18 @@ def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): # Grab the first timestamped face timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) # Attributes include glasses, headwear, facial hair, smiling, # direction of gaze, etc. - print("\tAttributes:") + print("Attributes:") for attribute in timestamped_object.attributes: print( - "\t\t{}:{} {}".format( + "\t{}:{} {}".format( attribute.name, attribute.value, attribute.confidence ) ) diff --git a/video/cloud-client/analyze/video_detect_person_beta.py b/video/cloud-client/analyze/video_detect_person_beta.py index 770594ef3baf..2fc7e5942dd8 100644 --- a/video/cloud-client/analyze/video_detect_person_beta.py +++ b/video/cloud-client/analyze/video_detect_person_beta.py @@ -67,32 +67,32 @@ def detect_person(local_file_path="path/to/your/video-file.mp4"): # Grab the first timestamped object timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) # Attributes include unique pieces of clothing, # poses, or hair color. - print("\tAttributes:") + print("Attributes:") for attribute in timestamped_object.attributes: print( - "\t\t{}:{} {}".format( + "\t{}:{} {}".format( attribute.name, attribute.value, attribute.confidence ) ) # Landmarks in person detection include body parts such as # left_shoulder, right_ear, and right_ankle - print("\tLandmarks:") + print("Landmarks:") for landmark in timestamped_object.landmarks: print( - "\t\t{}: {} (x={}, y={})".format( + "\t{}: {} (x={}, y={})".format( landmark.name, landmark.confidence, - landmark.point.x, - landmark.point.y, + landmark.point.x, # Normalized vertex + landmark.point.y, # Normalized vertex ) ) diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta.py b/video/cloud-client/analyze/video_detect_person_gcs_beta.py index 99e3faf6deba..b588891a2b4a 100644 --- a/video/cloud-client/analyze/video_detect_person_gcs_beta.py +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta.py @@ -63,32 +63,32 @@ def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): # Grab the first timestamped object timestamped_object = track.timestamped_objects[0] box = timestamped_object.normalized_bounding_box - print("\tBounding box:") - print("\t\tleft : {}".format(box.left)) - print("\t\ttop : {}".format(box.top)) - print("\t\tright : {}".format(box.right)) - print("\t\tbottom: {}".format(box.bottom)) + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) # Attributes include unique pieces of clothing, # poses, or hair color. - print("\tAttributes:") + print("Attributes:") for attribute in timestamped_object.attributes: print( - "\t\t{}:{} {}".format( + "\t{}:{} {}".format( attribute.name, attribute.value, attribute.confidence ) ) # Landmarks in person detection include body parts such as # left_shoulder, right_ear, and right_ankle - print("\tLandmarks:") + print("Landmarks:") for landmark in timestamped_object.landmarks: print( - "\t\t{}: {} (x={}, y={})".format( + "\t{}: {} (x={}, y={})".format( landmark.name, landmark.confidence, - landmark.point.x, - landmark.point.y, + landmark.point.x, # Normalized vertex + landmark.point.y, # Normalized vertex ) )