|
1 | 1 | import cv2
|
2 |
| -import torch |
3 | 2 | import numpy as np
|
4 |
| -from fer import FER |
| 3 | +from feat import Detector |
5 | 4 |
|
6 |
| -# Initialize Face Detector & Emotion Recognizer |
7 |
| -detector = FER() |
| 5 | +# Initialize Py-Feat Detector |
| 6 | +detector = Detector( |
| 7 | + face_model="retinaface", |
| 8 | + landmark_model="mobilenet", |
| 9 | + au_model="xgb", |
| 10 | + emotion_model="resmasknet" |
| 11 | +) |
8 | 12 |
|
9 | 13 | # Open webcam
|
10 | 14 | cap = cv2.VideoCapture(0)
|
|
14 | 18 | if not ret:
|
15 | 19 | break
|
16 | 20 |
|
17 |
| - # Detect emotion using FER |
18 |
| - results = detector.detect_emotions(frame) |
| 21 | + try: |
| 22 | + # Detect faces |
| 23 | + face_results = detector.detect_faces(frame) |
19 | 24 |
|
20 |
| - for result in results: |
21 |
| - (x, y, w, h) = result["box"] |
22 |
| - emotion, score = max(result["emotions"].items(), key=lambda item: item[1]) |
| 25 | + if face_results and len(face_results) > 0: |
| 26 | + for face in face_results: |
| 27 | + bbox = face["bbox"] # Correct way to extract bounding box |
| 28 | + landmarks = face["landmarks"] # Correct way to extract landmarks |
23 | 29 |
|
24 |
| - # Draw face box |
25 |
| - cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) |
| 30 | + # Ensure bbox is correctly formatted as (x, y, w, h) |
| 31 | + x, y, w, h = map(int, bbox) |
26 | 32 |
|
27 |
| - # Display emotion classification |
28 |
| - cv2.putText(frame, f"{emotion} ({score:.2f})", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, |
29 |
| - 0.8, (0, 255, 0), 2, cv2.LINE_AA) |
| 33 | + # Get emotion predictions |
| 34 | + emotion_scores = detector.detect_emotions(frame, facebox=bbox, landmarks=landmarks) |
| 35 | + |
| 36 | + if not emotion_scores.empty: |
| 37 | + dominant_emotion = emotion_scores.idxmax(axis=1).values[0] # Get most probable emotion |
| 38 | + else: |
| 39 | + dominant_emotion = "Unknown" |
30 | 40 |
|
31 |
| - cv2.imshow("Emotion Classification", frame) |
| 41 | + # Draw face bounding box |
| 42 | + cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) |
| 43 | + |
| 44 | + # Display dominant emotion |
| 45 | + cv2.putText(frame, f"Emotion: {dominant_emotion}", (x, y - 10), |
| 46 | + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA) |
| 47 | + |
| 48 | + except Exception as e: |
| 49 | + print(f"Error: {e}") |
| 50 | + |
| 51 | + cv2.imshow("Py-Feat Emotion Classification", frame) |
32 | 52 |
|
33 | 53 | if cv2.waitKey(1) & 0xFF == ord("q"): # Press 'q' to quit
|
34 | 54 | break
|
|
0 commit comments