-
Notifications
You must be signed in to change notification settings - Fork 405
/
Copy pathdata_out.py
46 lines (35 loc) · 1.57 KB
/
data_out.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from __future__ import annotations
import numpy as np
import rerun as rr
# ----------------------------------------------------------------------------------------------
# Load and prepare the data
# load the recording
recording = rr.dataframe.load_recording("face_tracking.rrd")
# query the recording into a pandas dataframe
record_batches = recording.view(index="frame_nr", contents="/blendshapes/0/jawOpen").select()
df = record_batches.read_pandas()
# convert the "jawOpen" column to a flat list of floats
df["jawOpen"] = df["/blendshapes/0/jawOpen:Scalar"].explode().astype(float)
# ----------------------------------------------------------------------------------------------
# Analyze the data
# compute the mouth state
df["jawOpenState"] = df["jawOpen"] > 0.15
# ----------------------------------------------------------------------------------------------
# Log the data back to the viewer
# Connect to the viewer
rr.init(recording.application_id(), recording_id=recording.recording_id())
rr.connect_grpc()
# log the jaw open state signal as a scalar
rr.send_columns(
"/jaw_open_state",
indexes=[rr.TimeSequenceColumn("frame_nr", df["frame_nr"])],
columns=rr.Scalar.columns(scalar=df["jawOpenState"]),
)
# log a `Label` component to the face bounding box entity
target_entity = "/video/detector/faces/0/bbox"
rr.log(target_entity, rr.Boxes2D.from_fields(show_labels=True), static=True)
rr.send_columns(
target_entity,
indexes=[rr.TimeSequenceColumn("frame_nr", df["frame_nr"])],
columns=rr.Boxes2D.columns(labels=np.where(df["jawOpenState"], "OPEN", "CLOSE")),
)