-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathface_rec_webcam.py
728 lines (592 loc) · 26.5 KB
/
face_rec_webcam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
###############################################################################
# #
# Object Detection Source Code For ISee #
# #
#-----------------------------------------------------------------------------#
# #
# Description: #
# This is a demo of running face recognition on live video from your webcam. #
# It's a little more complicated than the other example, but it includes some #
# basic performance tweaks to make things run a lot faster: #
# 1. Process each video frame at 1/4 resolution (though still display it at #
# full resolution) #
# 2. Only detect faces in every other frame of video. #
# #
# NOTE: This example requires OpenCV (the `cv2` library) to be installed only #
# to read from your webcam. OpenCV is NOT required to use the face_recognition#
# library. It's only required if you want to run this specific demo. If you #
# have trouble installing it, try any of the other demos that don't require it#
# instead. #
###############################################################################
#------------------------------------------------------------------------------
# Libraries Import
#------------------------------------------------------------------------------
import numpy as np
import cv2
import face_recognition
import pyttsx3
import os
import re
import glob
import speech_recognition as sr
from os import path
import math
from PIL import Image
import sys, select
import shutil
import socket
from build_index import build_index
from connect_fs import upload_file
from connect_fs import download_file
from rotateFace import rotate_image
#------------------------------------------------------------------------------
# Constants / Global Declaration
#------------------------------------------------------------------------------
# Flag for showing video stream
CV_SHOW_IMAGE_FLAG = False # Keep false until cv2 crash is resolved
# Flag for outputing audio notification
# *TODO*:
# The problem crush currently when both video and audio output is enable!!!!
# So PYTTSX3_OUTPUT_AUDIO is set to !(CV_SHOW_IMAGE_FLAG) for now. The value
# can be change to specific value when the bug is fixed.
PYTTSX3_OUTPUT_AUDIO = not CV_SHOW_IMAGE_FLAG
# Connection to eye tracker
SINGLE_DETACTION = True
fixation = (0, 0)
HOST = '192.168.8.3'
PORT = 50001
EYE_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Default path for adding new annotation file
ANNOTATION_PATH = "./known_annotation"
NUMANNOTATION = 1
# Variables for adding unknown person
UNKNOWN_PERSON_IDX = 1
STARING_THRESHOLD = 1 # Numbers of times seen unknown person to add into contact
KNOWN_FACE_ENCODINGS = []
KNOWN_FACE_NAMES = []
# Seen Counter for Unknown Faces
# format: {'name':'# seen'}
unknown_ppl_counters = {}
# Determine console or pi
WITHMONITOR = True
#------------------------------------------------------------------------------
# Environment Setup
#------------------------------------------------------------------------------
# Camera source - Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture("http://192.168.8.2:5000/stream/video.mjpeg")
#video_capture = cv2.VideoCapture(0)
# Audio source - Initiate speech engine
speech_engn = pyttsx3.init()
notify_perct_thresh = 0.3
# speech recognition by obtaining audio from the microphone
#speech_recognizer = sr.Recognizer()
#------------------------------------------------------------------------------
# Function to load Face and annotation records
#------------------------------------------------------------------------------
def LoadFaceAndEncoding(known_ppl_pics):
# Load a picture of each person and learn how to recognize it.
for known_person_pic in known_ppl_pics:
# get this person's name
image_name = os.path.basename(known_person_pic)
person_name = os.path.splitext(image_name)[0]
# get this person's face encoding
image = face_recognition.load_image_file(known_person_pic)
#print(face_recognition.face_encodings(image))
if len(face_recognition.face_encodings(image)) != 0:
face_encoding = face_recognition.face_encodings(image)[0]
#TODO: save this person's name and face encoding in DB!
# save this person's name and face encoding
KNOWN_FACE_NAMES.append(person_name)
KNOWN_FACE_ENCODINGS.append(face_encoding)
print("I can recognize " + person_name + " now.")
else:
print("Unable to detect face in {}".format(known_person_pic))
def SetupSpeechEngine():
rate = speech_engn.getProperty('rate')
speech_engn.setProperty('rate', rate * 1.225)
#------------------------------------------------------------------------------
# Function to record an unknown person
#------------------------------------------------------------------------------
def RecordUnknownPerson(face_encoding, unknown_id):
global KNOWN_FACE_ENCODINGS
global KNOWN_FACE_NAMES
global UNKNOWN_PERSON_IDX
# Assign a name to his/her
person_name = "Unknown_" + str(unknown_id)
print("in RecordUnknownPerson, person name is: " + person_name)
# Initialize value in unknown counter
if person_name in unknown_ppl_counters:
print("WARNING: unknown person should not be available in unknown_ppl_counters")
else:
unknown_ppl_counters[person_name] = 1
# TODO: save encoding in DB!
# ie. to save his/her face encoding
KNOWN_FACE_NAMES.append(person_name)
KNOWN_FACE_ENCODINGS.append(face_encoding)
print("Unknown person added, I can recognize " + person_name + " now.")
#increment UNKNOWN_PERSON_IDX
UNKNOWN_PERSON_IDX += 1
return person_name
def AddUnknownAsContact(unknown_id_name):
global KNOWN_FACE_NAMES
# Multiplication of STARING_THRESHOLD to make seen count negative
# if answer "no" to add contact
NEG_MULTI_OF_SEEN_THRES = 2
contact_request = "Would you like to add this person as a contact?"
print(contact_request)
answer = GetTextFromAudio(contact_request)
# Do nothing if user didn't answer the question. Another alternative is to remove the
# count, but since the voice recongition is flaky right now, let's assume that it is
# the speach engine that's unable to catch the response.
if answer is None:
return
if IsPositiveResponse(answer):
name_request = "What is his or her name?"
name_of_unknown_person = GetTextFromAudio(name_request)
# update person name
# TODO: update in DB!
for idx, known_face_name in enumerate(KNOWN_FACE_NAMES):
if unknown_id_name in known_face_name:
print("Update " + unknown_id_name + " to " + name_of_unknown_person)
KNOWN_FACE_NAMES[idx] = name_of_unknown_person
# confirm updated contact
contact_added = GetTextFromAudio(name_of_unknown_person + "is added to your contact list.")
print("Here is the updated contact list:")
print(KNOWN_FACE_NAMES)
# Remove this user from unknown_ppl_counters as name is now added
del unknown_ppl_counters[unknown_id_name]
else:
# Make seen count negative as user answers "no", user will have to see
# see this person more often for the question to prompt
unknown_ppl_counters[unknown_id_name] = - NEG_MULTI_OF_SEEN_THRES * STARING_THRESHOLD
print("No one is added to your contact.")
def slientRecord(face_image, face_encoding, unknown_id):
# Hard code for now
cache_directory = "./cache"
# save image with unknown_id
#cv2.imshow('image',face_image)
#cv2.imwrite("{}.jpg".format(unknown_id), face_image)
pil_image = Image.fromarray(face_image)
pil_image.save("{}/{}.jpg".format(cache_directory,unknown_id))
print("capturing unknown contact into database")
# short recording for 3sec.
#------------------------------------------------------------------------------
# Face Recongition Function
#------------------------------------------------------------------------------
def FaceRecognitionWebcam():
# TODO: check if database is empty
# if yes, load Face and annotation records from database and save them
# if no, then do not need to record these people again
# load pictures of known people from known_ppl_path
known_ppl_pics = glob.glob("./face_database/*.jpg")
LoadFaceAndEncoding(known_ppl_pics)
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
global STARING_THRESHOLD
global KNOWN_FACE_NAMES
global KNOWN_FACE_ENCODINGS
best_match_index = -1
last_seen_person = "unknown"
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/2 size for faster face recognition processing
#small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
small_frame = frame
# Convert the image from BGR color (which OpenCV uses) to RGB color
# (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Mainline Recongition
#------------------------------------------------------------------------------
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
EyePosition()
if SINGLE_DETACTION:
if len(face_locations) > 1:
print("debug1")
index = getFaceIndex(face_locations)
face_encodings = face_recognition.face_encodings(rgb_small_frame, [face_locations[index]])
else:
print("debug2")
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
print(face_encodings)
face_names = []
for face_encoding in face_encodings:
print("debug3")
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(KNOWN_FACE_ENCODINGS, face_encoding)
name = "Unknown"
if True in matches:
print("debug4")
# If a match was found in KNOWN_FACE_ENCODINGS, just use the first one.
# first_match_index = matches.index(True)
# name = KNOWN_FACE_NAMES[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(KNOWN_FACE_ENCODINGS, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index] and face_distances[best_match_index] < 0.5:
name = KNOWN_FACE_NAMES[best_match_index]
else:
#TODO: we should associate staring counter with each unknown person in DB
unknown_name = RecordUnknownPerson(face_encoding, UNKNOWN_PERSON_IDX)
print("Recognized new unknown person: {} with counter: {}".format(unknown_name, unknown_ppl_counters[unknown_name]))
# It is possible that matched person is actually unknown in database.
# Handle this case with special care:
# Count the number of times this unknown person is seen
if "unknown" in name.lower() and name in unknown_ppl_counters:
# Increment seen count
unknown_ppl_counters[name] += 1
print("Staring count is: " + str(unknown_ppl_counters[name]))
# if you continue to stare at this unknown person
# i.e. might be having a conversation with
# prompt to add this person as a contact
if unknown_ppl_counters[name] > STARING_THRESHOLD:
#AddUnknownAsContact(name)
top, right, bottom, left = face_locations[0][0], face_locations[0][1], face_locations[0][2], face_locations[0][3]
face_img = rgb_small_frame[top:bottom, left:right]
print("face_image: {}, dimension: {} {} {} {}".format(face_img, top, right, bottom, left))
slientRecord(face_img, face_encoding, name)
# Prompt information to user
NotifyNameAndInfo(name, best_match_index)
# Feature to add new person or annotation
# if "unknown" not in name.lower() and name != last_seen_person:
# OptionForNewAnnotation(best_match_index, name)
# TODO: option to pull out all of this person's pictures
# Update last seen person
last_seen_person = name
face_names.append(name)
break
# Display the results
if CV_SHOW_IMAGE_FLAG:
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Update frame control knob
process_this_frame = not process_this_frame
#return the index of the face that is closest to fixation in face_locations
def getFaceIndex(face_locations):
fixation_distances = []
for coordinate in face_locations:
print(coordinate[0], coordinate[1])
fixation_distances.append((int(coordinate[0]) - fixation[0])**2 + (int(coordinate[1]) - fixation[1])**2)
return fixation_distances.index(min(fixation_distances))
#------------------------------------------------------------------------------
# Voice Notification Functions
#------------------------------------------------------------------------------
def VoiceNotification(str_txt):
speech_engn.say(str_txt)
speech_engn.runAndWait()
def NotifyNameAndInfo(name, idx):
notification = ""
annotations = []
# Format notification message
if "Unknown" in name:
notification = "There is an unknown person in front you."
else:
notification = "This is " + name
annotations = ReadAnnotationFromId(idx)
# Notify name
if PYTTSX3_OUTPUT_AUDIO:
VoiceNotification(notification)
else:
print("NOTFIY: {}".format(notification))
# Notify annotations
counter = 0
for a in annotations:
if PYTTSX3_OUTPUT_AUDIO:
VoiceNotification(a)
else:
print("ANNOTATION: {}".format(a))
# only read out certain number of annotations
if counter > NUMANNOTATION:
break
print("\n")
#------------------------------------------------------------------------------
# Speech Recognition Functions
#------------------------------------------------------------------------------
def GetTextFromAudio(indicator):
with sr.Microphone() as source:
VoiceNotification(indicator)
audio = speech_recognizer.listen(source,phrase_time_limit=6)
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
text = speech_recognizer.recognize_google(audio)
print("Google Speech Recognition thinks you said " + text)
return text
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
pass
except Exception as e:
print("Other speech recongition error: {0}".format(e))
raise
#------------------------------------------------------------------------------
# Annotation Related Functions
#------------------------------------------------------------------------------
def ReadAnnotationFromId(idx):
# Get matching annotation file from database
af, found = GetAnnotationByFRId(idx)
if found == False:
return []
# Read Annotation File
f = open(af, "r")
annotations = f.readlines()
# close the file after reading the lines.
f.close()
return annotations
def AnnotateById(idx, annotations):
# Get matching annotation file from database
af, found = GetAnnotationByFRId(idx)
try:
f = open(af, "a+")
# Create new file if annotation not found
if not found:
af = ANNOTATION_PATH + "/" + str(idx) + ".txt"
f = open(af, "w+")
# Append new annotation line by line
for a in annotations:
if a is not None:
f.write(a + '\n')
# close the file after writing the lines.
f.close()
except IOError:
print ("Warning: Unable to open & write annotation record for ID:{}".format(idx))
pass
def AnnotateByName(name, annotations):
# Get matching annotation file from database
af, found = GetAnnotationByName(name)
try:
f = open(af, "a+")
# Create new file if annotation not found
if not found:
af = "{}/{}.txt".format(ANNOTATION_PATH, name)
f = open(af, "w+")
# Append new annotation line by line
if annotations is not None:
f.write(annotations + '\n')
# close the file after writing the lines.
f.close()
except IOError:
print ("Warning: Unable to open & write annotation record for ID:{}".format(idx))
pass
def OptionForNewAnnotation(idx, name):
"""Prompt User to add new annotation if needed"""
annotation_request = "Would you like to add note for " + name + "?"
print(annotation_request)
addFlag = GetTextFromAudio(annotation_request)
annotations = []
while IsPositiveResponse(addFlag):
# Prompt user to input note
annotate = GetTextFromAudio("Please state your note")
# Save output to list
annotations.append(annotate)
# Continue
continue_record_request = "Would you like to add more note?"
print(continue_record_request)
# Prompt for more annotation
addFlag = GetTextFromAudio(continue_record_request)
# Record annotation if there is any
if annotations:
AnnotateById(idx, annotations)
#------------------------------------------------------------------------------
# Database functions
#------------------------------------------------------------------------------
def GetAnnotationByFRId(idx):
# hardcode for now
personal_annotation = ANNOTATION_PATH + "/"+ str(idx) + ".txt"
if os.path.exists(personal_annotation) == True:
return (personal_annotation, True)
else:
return ('No such file', False)
def GetAnnotationByName(name):
# hardcode for now
personal_annotation = ANNOTATION_PATH + "/"+ str(name) + ".txt"
if os.path.exists(personal_annotation) == True:
return (personal_annotation, True)
else:
return ('No such file', False)
# def updateAnnotationByFR(name,path):
# def idToName(id):
# def nameToId(name):
#------------------------------------------------------------------------------
# General Utility functions
#------------------------------------------------------------------------------
def IsPositiveResponse(response):
if response is None:
return False
if ( "yes" in response.lower() or
"yup" in response.lower() or
"yah" in response.lower() or
"sure" in response.lower() or
"ok" in response.lower() or
"go ahead" in response.lower() ):
return True
else:
return False
#------------------------------------------------------------------------------
# Add New Contact or Annotation Logic
#------------------------------------------------------------------------------
def TimedInputPrompt(t, q):
print ("{} seconds time out".format(t))
print (q)
i, o, e = select.select( [sys.stdin], [], [], t )
if i:
return sys.stdin.readline().strip()
else:
return ""
def ConsoleSaveUnknownFaces():
print("\n\nPlease manage unknown person you have seen today")
# Loop over unknown faces
cap_img = glob.glob1('./cache',"*.jpg")
num_cap_img = len(cap_img)
if num_cap_img != 0:
answer = TimedInputPrompt(5, "Would you like to add {} captured unknown people into you contact? [y/n]\n".format(num_cap_img))
if "y" in answer.lower():
for im in cap_img:
im_p = Image.open('./cache/{}'.format(im))
im_p.show()
print("Please enter the name for this person if you want to save to contact, else enter nothing")
name = input()
if name == '':
# remove cache photo
os.remove('./cache/{}'.format(im))
print("{} removed from cache".format(im))
else:
shutil.move('./cache/{}'.format(im), './face_database/{}.jpg'.format(name))
# cleanup remaining cache files
cap_img = glob.glob1('./cache',"*.jpg")
for f in cap_img:
os.remove('./cache/{}'.format(f))
# Wait to ask to add annotation
flag = True
ctr = 0
while flag:
answer = TimedInputPrompt(5, "\nWould you like to add annotation for people in the contact? [y/n]")
if "y" in answer.lower():
kwn_img = glob.glob1('./face_database',"*.jpg")
print("Please enter name of the contact: ")
name = input()
match = [f for f in kwn_img if name.lower() in f.lower()]
if len(match) == 0:
print("{} is not found in the contact, please enter another name".format(name))
elif len(match) == 1:
print("Please enter annotation for the contact:\n")
annotation = input()
base = os.path.basename(match[0])
fname = os.path.splitext(base)[0]
AnnotateByName(fname, annotation)
ctr = 0
else:
print("More than one contact is found with this name, please enter the full name:\n{}".format(match))
else:
flag = False
ctr += 1
if ctr > 3:
break
print("\n\nProgram existing now, thanks for using :)")
#------------------------------------------------------------------------------
# Cleanup Functions
#------------------------------------------------------------------------------
def GeneralCleanup():
# Turn off audio source
speech_engn.stop()
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
def MoveCacheToDB():
cache_path = './cache'
db_path = './face_database'
files = os.listdir(cache_path)
for f in files:
shutil.move('{}/{}'.format(cache_path, f), db_path)
def RemoveUnknownInDB():
db_path = './face_database'
files = os.listdir(db_path)
for f in files:
if 'unknown_' in f.lower():
os.remove('{}/{}'.format(db_path,f))
#------------------------------------------------------------------------------
# Connection to Eye Tracker
#------------------------------------------------------------------------------
def EyePosition():
# pass
try:
global EYE_SOCKET
data = EYE_SOCKET.recv(1024)
print('Received', repr(data))
text = data.decode("utf-8")
print(text)
coord = [int(n) for n in text.split()]
print(coord)
fixation = (coord[0], coord[1])
return fixation
except:
pass
def SetupEyeTracker():
global EYE_SOCKET
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.connect((HOST, PORT))
# EYE_SOCKET = s
EYE_SOCKET.connect((HOST, PORT))
data = EYE_SOCKET.recv(1024)
print('Received', repr(data))
text = data.decode("utf-8")
print(text)
coord = [int(n) for n in text.split()]
print(coord)
fixation = (coord[0], coord[1])
#------------------------------------------------------------------------------
# Run Program
#------------------------------------------------------------------------------
if __name__ == "__main__":
try:
# while(True):
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.connect((HOST, PORT))
# # s.sendall((b'Hello, world'))
# while(True):
# data = s.recv(1024)
# print('Received', repr(data))
# coord = [int(s) for s in str.split() if s.isdigit()]
# fixation = (coord[0], coord[1])
SetupEyeTracker()
SetupSpeechEngine()
# rotate image
rotate_image()
#download_file()
#RemoveUnknownInDB()
FaceRecognitionWebcam()
GeneralCleanup()
except KeyboardInterrupt:
if WITHMONITOR:
ConsoleSaveUnknownFaces()
MoveCacheToDB()
# ******rotate image
rotate_image()
build_index()
upload_file()
except Exception as e:
raise e