from PyQt5.QtCore import QThread, pyqtSignal import core.face_detection as ftm from numpy import ndarray import cv2 class CameraThread(QThread): new_frame = pyqtSignal(ndarray) storage_frame = pyqtSignal(ndarray) fps_signal = pyqtSignal(float) def __init__(self): super().__init__() self.cap = cv2.VideoCapture(0) self.fps = self.cap.get(cv2.CAP_PROP_FPS) self.face_detector = ftm.FaceDetector() self.running = True def run(self): while self.running: ret, frame = self.cap.read() if ret: rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) original_img, img, bboxs = self.face_detector.find_faces(rgb_image) face_img = None for bbox in bboxs: try: x, y, w, h, confidence = bbox face_img = img[y:y + h, x:x + w] except Exception as e: print(e) pass self.new_frame.emit(original_img) if face_img is not None: self.fps_signal.emit(self.fps) self.storage_frame.emit(cv2.resize(face_img, (224, 224))) def stop(self): self.running = False self.wait() self.cap.release()