-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathconfig_tflite_ssd_stream.py
231 lines (171 loc) · 10.4 KB
/
config_tflite_ssd_stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
from configs.config_base import ConfigBase
from configs.config_patterns import door_movement
from lib.constants import InputMode, DetectorType
from detection.door_state_detectors import SingleShotFrameDiffDoorStateDetector
from lib.detection_buffer import SlidingWindowDetectionBuffer, SimpleDetectionBuffer
from notifier import NotificationTypes
class Config(ConfigBase):
def __init__(self):
super().__init__()
# whether to show fps in the output video feed
self.show_fps = True
# the fps to limit the output video feed to.
# not necessary since it is automatically limited to the speed of the motion detector
self.video_feed_fps = -1
# print fps on the console every x frames
self.fps_print_frames = 10
## MOTION DETECTOR CONFIG
# size of the smallest box or contour of motion (shown by yellow boxes if md_show_all_contours is True)
self.md_min_cont_area = 50
# the image thresholding value for the motion detector
# read about image thresholding in opencv here: https://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html
self.md_tval = 25
# the higher the background accumulation weight the lower the "memory" of the motion detector
# for new objects. in other words a higher value will show motion detected for shorter periods
# while lower value will show motion detected longer for any new objects that came into the frame
# tune this as per the fps of your video stream and speed of objects you are detecting (e.g. a snail vs a person)
self.md_bg_accum_weight = 0.5
# if True will show granular motion detection contours
self.md_show_all_contours = False
# set to number of frames to "warm up" the motion detector
# during these frames only the background model is updated
# and no motion detection is done
self.md_warmup_frame_count = -1
# whether to update background model at all
# motion detection won't happen if this is False
self.md_update_bg_model = True
# you can reset the background model dynamically
# through /config by setting this
self.md_reset_bg_model = False
# limits the motion detector frame rate (so that you can save CPU cycles for the object detector)
self.md_frame_rate = 5
# minimum size of the box for detected motion (useful for filtering small motion like tiny shadows or curtains moving)
self.md_box_threshold_y = 200
self.md_box_threshold_x = 200
# do motion detection only within this mask
self.md_mask = (250, 0, 690, 520)
# don't do motion detection in this mask
self.md_nmask = None
# blur the output video wherever there is motion
# useful to share videos of argos in action or even
# if you are privacy conscious at home
self.md_blur_output_frame = False
# blurs the output frame of the object detector
self.od_blur_output_frame = False
## OBJECT DETECTOR CONFIG
# path to the tensorflow model. will be the "saved_model" directory for a TF2 model
# or the tflite file for a tf lite model
self.tf_model_path = 'tf_models/tflite/coco_ssd_mobilenet_v1_1.0_quant/detect.tflite'
# path to the label map of the tensor flow model
self.tf_path_to_labelmap = 'tf_models/tflite/coco_ssd_mobilenet_v1_1.0_quant/labelmap.txt'
# tensorflow score threshold (i like to call it accuracy)
self.tf_accuracy_threshold = 0.4
# list of labels to filter on. use your own labels if you have a custom trained model
# i just use coco
self.tf_detection_labels = ['person', 'dog']
# list of masks to filter the detections by (objects detected outside these masks are ignored)
self.tf_detection_masks = None
# list of negative masks to filter the detections by (objects detected inside these masks are ignored)
self.tf_detection_nmasks = None
# minimum size of the object detected (lets not detect a person inside a newspaper kept on the table or a photo frame)
self.tf_box_thresholds = (150, 150)
# configure the detection buffer.
# the SimpleDetectionBuffer just uses the maximum accurate detection from a single frame
# the SlidingWindowDetectionBuffer accumulates detections and the detector only reports one
# when buffer_threshold number of detections were found in buffer_duration millis
# self.tf_detection_buffer = SlidingWindowDetectionBuffer(3000, 4)
self.tf_detection_buffer = SimpleDetectionBuffer()
# switch between TF1, TF2 or TFLITE
self.tf_detector_type = DetectorType.TFLITE
# you can disable motion detection entirely and run the object detector directly
# if you have an expensive GPU or a coral TPU
self.tf_apply_md = True
# write a jpeg file with the detection shown on it
self.tf_od_frame_write = True
# write pascal VOC format xml file with the detection
# this is useful to then later train a model on these detections
# do transfer learning and create your own labels
self.tf_od_annotation_write = True
# path where above jpegs are stored
self.tf_output_detection_path = '/home/pi/detections'
# limit the speed of the object detector (useful in testing)
self.od_frame_rate = -1
# size of the task queue between the motion detector and object detector threads
# increasing this will cause more lagged detection
self.od_task_q_size = 1000
## PATTERN DETECTOR CONFIG
# enable the movement pattern detector
self.pattern_detection_enabled = True
# this is where you can define your own patterns and do something
# beyond door movement (the first use case this project was built for)
# `pattern_detection_pattern_steps` needs to be a list of tuples (pattern: [list of states])
# the list of states can include NotStates for negation
# see the doc of `PatternDetector.find_mov_ptn_in_state_history()` to see how
# all this works with examples.
# argos ships with a pattern out of the box in config_patterns/door_movement.py
self.pattern_detection_pattern_steps = door_movement.pattern_steps
# duration of time in seconds for which the pattern detection state history should
# to be maintained
self.pattern_detection_state_history_length = 20
# duration (in seconds) for which pattern detection state history length
# should be kept if there are partial pattern matches
self.pattern_detection_state_history_length_partial = 300
# run the pattern detector even interval seconds
self.pattern_detection_interval = 1
# see the docs for the respective door state detectors to understand how they work
# and how their parameters make them behave. you can choose one based on your environment
# self.door_state_detector = SingleShotDoorStateDetector((215, 114, 227, 123), (118, 80, 26), (151, 117, 72))
# self.door_state_detector = AdaptiveDoorStateDetector((215, 114, 227, 123),
# (DoorStates.DOOR_CLOSED, DoorStates.DOOR_OPEN))
self.door_state_detector = SingleShotFrameDiffDoorStateDetector((215, 114, 227, 123), (196, 131, 215, 147))
# show the door state in the output video frame
self.door_state_detector_show_detection = True
# if True you can run through the output video (on the flask server) step by step
# by pressing any key on the console where you're running stream.py.
# press 'q' to quit
self.debug_mode = False
## NOTIFIER CONFIG
#whether to enable MQTT notifications to HA
self.send_mqtt = False
#whether to enable webhook based notifications to HA
self.send_webhook = True
# keep sending the last motion state every x seconds (in case HA restarted or just didnt
# get our message last time
self.mqtt_heartbeat_secs = 30
# topic where object detections are sent
self.mqtt_object_detect_topic = 'home-assistant/pi-object-detection/main_door/object'
# topic where pattern detections are sent
self.mqtt_movement_pattern_detect_topic = 'home-assistant/pi-object-detection/main_door/pattern'
# topic where other state changes are sent
self.mqtt_state_detect_topic = 'home-assistant/pi-object-detection/main_door/state'
# webhook url where object detections are sent
self.ha_webhook_object_detect_url = "https://<your-hass>.duckdns.org:8123/api/webhook/pi_object_detection_main_door?object={}&img={}"
# webhook url where pattern detections are sent
self.ha_webhook_pattern_detect_url = "https://<your-hass>.duckdns.org:8123/api/webhook/pi_pattern_detection_main_door?pattern={}&img={}"
# webhook url where other state detections are sent
self.ha_webhook_state_detect_url = "https://<your-hass>.duckdns.org:8123/api/webhook/pi_state_detection_main_door?state={}"
# this is how the detection images are scp'ed to your HA installation
# so that they can be referenced in the android/iOS notification (served from HA's webserver)!
self.ha_webhook_ssh_host = '<your-hass-host>'
self.ha_webhook_ssh_username = 'pi'
self.ha_webhook_target_dir = '/usr/share/hassio/homeassistant/www/detections/'
# usual mqtt stuff to connect to HA
self.mqtt_host = '<your-mqtt-host>'
self.mqtt_port = 1883
self.mqtt_username = "mqtt"
self.mqtt_password = "------"
# size of the notifiier queue. increase it if you have a slower network
# although the default is more than enough
self.notifier_queue_size = 1000
# set per second rate limits for different kinds of notifications
# can be fractional as well
self.notifier_rate_limits = {
NotificationTypes.OBJECT_DETECTED: 1,
NotificationTypes.PATTERN_DETECTED: 0,
NotificationTypes.DOOR_STATE_CHANGED: 0,
NotificationTypes.MOTION_STATE_CHANGED: 0.1
}
## INPUT CONFIG
# supports RTMP, picamera and local video file
self.input_mode = InputMode.RTMP_STREAM
self.rtmp_stream_url = "rtmp://192.168.1.19:43331/live/main_door"