-
Notifications
You must be signed in to change notification settings - Fork 6.5k
/
Copy pathvideo_detect_faces_gcs.py
80 lines (66 loc) · 2.95 KB
/
video_detect_faces_gcs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START video_detect_faces_gcs]
from google.cloud import videointelligence_v1 as videointelligence
def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"):
"""Detects faces in a video."""
client = videointelligence.VideoIntelligenceServiceClient()
# Configure the request
config = videointelligence.FaceDetectionConfig(
include_bounding_boxes=True, include_attributes=True
)
context = videointelligence.VideoContext(face_detection_config=config)
# Start the asynchronous request
operation = client.annotate_video(
request={
"features": [videointelligence.Feature.FACE_DETECTION],
"input_uri": gcs_uri,
"video_context": context,
}
)
print("\nProcessing video for face detection annotations.")
result = operation.result(timeout=300)
print("\nFinished processing.\n")
# Retrieve the first result, because a single video was processed.
annotation_result = result.annotation_results[0]
for annotation in annotation_result.face_detection_annotations:
print("Face detected:")
for track in annotation.tracks:
print(
"Segment: {}s to {}s".format(
track.segment.start_time_offset.seconds
+ track.segment.start_time_offset.microseconds / 1e6,
track.segment.end_time_offset.seconds
+ track.segment.end_time_offset.microseconds / 1e6,
)
)
# Each segment includes timestamped faces that include
# characteristics of the face detected.
# Grab the first timestamped face
timestamped_object = track.timestamped_objects[0]
box = timestamped_object.normalized_bounding_box
print("Bounding box:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}".format(box.bottom))
# Attributes include glasses, headwear, smiling, direction of gaze
print("Attributes:")
for attribute in timestamped_object.attributes:
print(
"\t{}:{} {}".format(
attribute.name, attribute.value, attribute.confidence
)
)
# [END video_detect_faces_gcs]