-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathhelper.py
432 lines (355 loc) · 14.9 KB
/
helper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
from ultralytics import YOLO
from ultralytics.solutions import object_counter
from collections import defaultdict
import time
import streamlit as st
import cv2
import numpy as np
import csv
import pandas as pd
import settings
import datetime
import matplotlib.pyplot as plt
import sqlite3
import hashlib
import os
from dotenv import load_dotenv
from twilio.rest import Client
def send_sms():
account_sid = 'AC355b02488c65d560e0cd9faa55841325'
auth_token = 'c193b2c7853e0a502b6787b15c012ca4'
# Twilio phone number (the one you got from Twilio)
from_phone = '+18564324705'
# Recipient's phone number
to_phone = '+9203195093731' # Include country code
# Your message
message = 'Hello from Twilio!'
# Initialize Twilio client
client = Client(account_sid, auth_token)
try:
# Send SMS
message = client.messages.create(
body=message,
from_=from_phone,
to=to_phone
)
print("SMS sent successfully! SID:", message.sid)
except Exception as e:
print("Error sending SMS:", e)
def load_model(model_path):
"""
Loads a YOLO object detection model from the specified model_path.
Parameters:
model_path (str): The path to the YOLO model file.
Returns:
A YOLO object detection model.
"""
model = YOLO(model_path)
return model
# def display_tracker_options():
# display_tracker = st.radio("Display Tracker", ('Yes', 'No'))
# is_display_tracker = True if display_tracker == 'Yes' else False
# if is_display_tracker:
# tracker_type = st.radio("Tracker", ("bytetrack.yaml", "botsort.yaml"))
# return is_display_tracker, tracker_type
# return is_display_tracker, None
def _display_detected_frames(conf, model, st_frame, image):
#, tracker=None
# is_display_tracking=None,
"""
Display the detected objects on a video frame using the YOLOv8 model.
Args:
- conf (float): Confidence threshold for object detection.
- model (YoloV8): A YOLOv8 object detection model.
- st_frame (Streamlit object): A Streamlit object to display the detected video.
- image (numpy array): A numpy array representing the video frame.
- is_display_tracking (bool): A flag indicating whether to display object tracking (default=None).
Returns:
None
"""
# Resize the image to a standard size
image = cv2.resize(image, (720, int(720*(9/16))))
# Display object tracking, if specified
#if is_display_tracking:
res = model.track(image, conf=conf, persist=True)
#else:
# Predict the objects in the image using the YOLOv8 model
#res = model.predict(image, conf=conf)
# # Plot the detected objects on the video frame
res_plotted = res[0].plot()
st_frame.image(res_plotted,
caption='Detected Video',
channels="BGR",
use_column_width=True
)
#is_display_tracker, tracker = display_tracker_options()
def play_rtsp_stream(conf, model):
"""
Plays an rtsp stream. Detects Objects in real-time using the YOLOv8 object detection model.
Parameters:
conf: Confidence of YOLOv8 model.
model: An instance of the `YOLOv8` class containing the YOLOv8 model.
Returns:
None
Raises:
None
"""
source_rtsp = st.sidebar.text_input("rtsp stream url:")
st.sidebar.caption('Example URL: rtsp://admin:12345@192.168.1.210:554/Streaming/Channels/101')
#is_display_tracker, tracker = display_tracker_options()
if st.sidebar.button('Detect Objects'):
try:
vid_cap = cv2.VideoCapture(source_rtsp)
st_frame = st.empty()
while (vid_cap.isOpened()):
success, image = vid_cap.read()
if success:
_display_detected_frames(conf,
model,
st_frame,
image,
)
#is_display_tracker,tracker
else:
vid_cap.release()
# vid_cap = cv2.VideoCapture(source_rtsp)
# time.sleep(0.1)
# continue
break
except Exception as e:
vid_cap.release()
st.sidebar.error("Error loading RTSP stream: " + str(e))
def plot_donut_chart(data1, data2, title):
labels = ['Outflow Count', 'Inflow Count']
sizes = [data1['Out Count'], data2['In Count']]
fig, ax = plt.subplots()
ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title(title)
st.pyplot(fig)
def plot_histogram(data1, data2, title):
# Data
labels = ['Out Count', 'In Count']
sizes = [data1['Out Count'], data2['In Count']]
# Define colors for each bar
colors = ['blue', 'orange']
# Create histogram
fig, ax = plt.subplots()
ax.bar(labels, sizes, color=colors)
ax.set_xlabel('Categories')
ax.set_ylabel('Counts')
ax.set_title(title)
plt.xticks(rotation=45) # Rotate x-axis labels for better readability
st.pyplot(fig)
def plot_vehicle_histogram(data, title):
# Data
labels = data.keys()
sizes = data.values()
# Define colors for each bar
colors = ['blue', 'green', 'red', 'orange', 'purple', 'yellow']
# Create histogram
fig, ax = plt.subplots()
ax.bar(labels, sizes, color=colors)
ax.set_xlabel('Vehicle Types')
ax.set_ylabel('Counts')
ax.set_title(title)
plt.xticks(rotation=45) # Rotate x-axis labels for better readability
st.pyplot(fig)
def play_stored_video(conf, model):
"""
Plays a stored video file. Tracks and detects objects in real-time using the YOLOv8 object detection model.
Parameters:
conf: Confidence of YOLOv8 model.
model: An instance of the `YOLOv8` class containing the YOLOv8 model.
Returns:
None
Raises:
None
"""
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") # Generate timestamp
csv_filename = f"object_counts_{timestamp}.csv"
# Check if the CSV file already exists
if not os.path.exists(csv_filename):
# Create the CSV file with headers
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = ['frame_number', 'in_count', 'out_count', 'vehicle_type']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader() # Write the header row
source_vid = st.sidebar.selectbox(
"Choose a video...", settings.VIDEOS_DICT.keys())
#is_display_tracker, tracker = display_tracker_options()
with open(settings.VIDEOS_DICT.get(source_vid), 'rb') as video_file:
video_bytes = video_file.read()
if video_bytes:
st.video(video_bytes)
if st.sidebar.button('Generate Dashboard'):
df = pd.read_csv('test3.csv')
last_row = df.iloc[-1] # Get the last row of the DataFrame
out_count = last_row['out_count']
in_count = last_row['in_count']
vehicle_type = df['vehicle_type']
out_count = int(out_count)
in_count = int(in_count)
out_info_counts = {'Out Count': out_count}
in_info_counts = {'In Count': in_count}
if in_info_counts.get('In Count', 0) > 2:
send_sms()
vehicle_counts = defaultdict(int)
for vt in vehicle_type:
vehicle_counts[vt] += 1
st.title("Outflow and Inflow Info Donut Chart")
plot_donut_chart(out_info_counts,in_info_counts ,"Outflow and Inflow Info Distribution")
st.title("Outflow and Inflow Histogram")
plot_histogram(out_info_counts,in_info_counts,"Outflow and Inflow Histogram")
plot_vehicle_histogram(vehicle_counts, "Vehicle Types Distribution")
if st.sidebar.button('Detect Video Objects'):
try:
vid_cap = cv2.VideoCapture(
str(settings.VIDEOS_DICT.get(source_vid)))
track_history = defaultdict(lambda: [])
assert vid_cap.isOpened(), "Error reading video file"
counter = object_counter.ObjectCounter() # Init Object Counter
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
counter.set_args(view_img=True,
reg_pts=region_points,
classes_names=model.names,
draw_tracks=True)
st_frame = st.empty()
while (vid_cap.isOpened()):
success, im0 = vid_cap.read()
if success:
with open(csv_filename, 'a', newline='') as csvfile: # Open CSV within the loop
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
in_count = counter.in_counts
out_count = counter.out_counts
frame_number = vid_cap.get(cv2.CAP_PROP_POS_FRAMES)
results = model.track(im0, persist=True)
for frame_index, frame_results in enumerate(results):
for detection in frame_results.boxes:
class_id = detection.cls
vehicle_type = model.names[int(class_id)]
writer.writerow({'frame_number': frame_number, 'in_count': in_count, 'out_count': out_count, 'vehicle_type': vehicle_type})
image = cv2.resize(im0, (720, int(720*(9/16))))
im0 = counter.start_counting(im0, results)
if results is not None and results[0].boxes is not None and results[0].boxes.id is not None:
# Get the boxes and track IDs
boxes = results[0].boxes.xywh.cpu()
track_ids = results[0].boxes.id.int().cpu().tolist()
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Plot the tracks
for box, track_id in zip(boxes, track_ids):
x, y, w, h = box
track = track_history[track_id]
track.append((float(x), float(y))) # x, y center point
if len(track) > 30: # retain 90 tracks for 90 frames
track.pop(0)
# Draw the tracking lines
points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10)
st_frame.image(annotated_frame,
caption='Detected Video',
channels="BGR",
use_column_width=True
)
#is_display_tracker,tracker
else:
vid_cap.release()
break
except Exception as e:
st.sidebar.error("Error loading video: " + str(e))
def create_connection():
"""
Create a database connection to the SQLite database.
Returns:
conn: SQLite database connection object
"""
conn = None
try:
conn = sqlite3.connect('database.db') # Replace 'database.db' with your database file name
return conn
except sqlite3.Error as e:
print(e)
return conn
def create_user_table(conn):
"""
Create a 'users' table in the database if it doesn't exist.
Parameters:
conn (sqlite3.Connection): SQLite database connection object
"""
try:
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
username TEXT UNIQUE NOT NULL,
password TEXT NOT NULL
)''')
conn.commit()
except sqlite3.Error as e:
print(e)
def authenticate_user(conn, username, password):
"""
Authenticate a user based on the provided username and password.
Parameters:
conn (sqlite3.Connection): SQLite database connection object
username (str): Username provided by the user
password (str): Password provided by the user
Returns:
bool: True if the user is authenticated, False otherwise
"""
# Hash the password provided by the user
hashed_password = hashlib.sha256(password.encode()).hexdigest()
# Execute a SQL query to fetch the user with the provided username and hashed password
cursor = conn.execute("SELECT * FROM users WHERE username = ? AND password = ?", (username, hashed_password))
# Fetch one row from the result set
user = cursor.fetchone()
# If the user exists in the database, return True (authenticated), otherwise return False
return user is not None
def login_page(conn):
"""
Display the login page.
Parameters:
conn (sqlite3.Connection): SQLite database connection object
"""
st.title("Login Page")
# Username and password input fields
username = st.text_input("Username")
password = st.text_input("Password", type="password")
# Login button
if st.button("Login"):
if not username or not password:
st.error("Username and password are required")
else:
# Authenticate the user
authenticated = authenticate_user(conn, username, password)
if authenticated:
st.session_state.is_logged_in = True
else:
st.error("Invalid username or password")
def play_passengercount_video(conf, model):
source_vid = st.sidebar.selectbox(
"Choose a video...", settings.VIDEOS_DICT.keys())
video_path = settings.VIDEOS_DICT.get(source_vid)
with open(video_path, 'rb') as video_file:
video_bytes = video_file.read()
if video_bytes:
st.video(video_bytes)
if st.sidebar.button('Detect Objects'):
try:
vid_cap = cv2.VideoCapture(str(video_path)) # Use full path here
st_frame = st.empty()
while (vid_cap.isOpened()):
success, image = vid_cap.read()
if success:
_display_detected_frames(conf,
model,
st_frame,
image,
)
#is_display_tracker,tracker
else:
vid_cap.release()
break
except Exception as e:
vid_cap.release()
st.sidebar.error("Error loading Video: " + str(e))