-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrealtimedetection.py
53 lines (41 loc) · 1.57 KB
/
realtimedetection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import cv2
from keras.models import model_from_json
import numpy as np
from tkinter import *
json_file = open("facialemotionmodel.json", "r")
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights("facialemotionmodel.h5")
haar_file=cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
face_cascade=cv2.CascadeClassifier(haar_file)
def extract_features(image):
feature = np.array(image)
feature = feature.reshape(1,48,48,1)
return feature/255.0
webcam = cv2.VideoCapture(0)
count = 0
webcam.set(3, 320)
webcam.set(4, 240)
labels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'neutral', 5: 'sad', 6: 'surprise'}
while True:
i,im=webcam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(im,1.3,5)
try:
for (p,q,r,s) in faces:
image = gray[q:q+s,p:p+r]
cv2.rectangle(im,(p,q),(p+r,q+s),(255,0,0),2)
image = cv2.resize(image,(48,48))
img = extract_features(image)
pred = model.predict(img)
prediction_label = labels[pred.argmax()]
# print("Predicted Output:", prediction_label)
# cv2.putText(im,prediction_label)
cv2.putText(im, '% s' % prediction_label, (p-10, q-10),cv2.FONT_HERSHEY_COMPLEX_SMALL,2, (0,255,0))
imgBackground2 = cv2.imread('Resources/background3.png')
imgBackground2[100:100 + 240, 60:60 + 320] = im
cv2.imshow("Real-Time Emotion Detection",imgBackground2)
cv2.waitKey(27)
except cv2.error:
pass