275 lines
11 KiB
Python
275 lines
11 KiB
Python
import cv2
|
|
import numpy as np
|
|
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QProgressBar
|
|
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QTimer, QMutex, QMutexLocker
|
|
from PyQt5.QtGui import QPixmap, QImage, QPainter, QPen
|
|
|
|
class VideoThread(QThread):
|
|
frame_ready = pyqtSignal(np.ndarray, np.ndarray) # standard_frame, webcam_frame
|
|
progress_update = pyqtSignal(float, str) # progress, status_text
|
|
comparison_finished = pyqtSignal()
|
|
|
|
def __init__(self, motion_app):
|
|
super().__init__()
|
|
self.motion_app = motion_app
|
|
self.video_path = None
|
|
self.is_running = False
|
|
self.should_stop = False
|
|
self.should_restart = False
|
|
|
|
def load_video(self, video_path):
|
|
self.video_path = video_path
|
|
|
|
def start_comparison(self, video_path):
|
|
self.video_path = video_path
|
|
self.is_running = True
|
|
self.should_stop = False
|
|
self.should_restart = False
|
|
self.start()
|
|
|
|
def stop_comparison(self):
|
|
self.should_stop = True
|
|
self.is_running = False
|
|
|
|
def restart_comparison(self):
|
|
self.should_restart = True
|
|
|
|
def run(self):
|
|
if not self.video_path or not self.motion_app.body_detector:
|
|
return
|
|
|
|
# Initialize video capture
|
|
standard_cap = cv2.VideoCapture(self.video_path)
|
|
if not standard_cap.isOpened():
|
|
return
|
|
|
|
# Initialize camera
|
|
if not self.motion_app.initialize_camera():
|
|
standard_cap.release()
|
|
return
|
|
|
|
# Get video properties
|
|
total_frames = int(standard_cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
video_fps = standard_cap.get(cv2.CAP_PROP_FPS)
|
|
if video_fps == 0:
|
|
video_fps = 30
|
|
video_duration = total_frames / video_fps
|
|
|
|
target_width, target_height = self.motion_app.get_display_resolution()
|
|
|
|
# Start audio if available
|
|
audio_loaded = self.motion_app.audio_player.load_audio(self.video_path)
|
|
if audio_loaded:
|
|
self.motion_app.audio_player.play()
|
|
|
|
import time
|
|
start_time = time.time()
|
|
frame_counter = 0
|
|
|
|
while self.is_running and not self.should_stop:
|
|
if self.should_restart:
|
|
standard_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
|
start_time = time.time()
|
|
if audio_loaded:
|
|
self.motion_app.audio_player.restart()
|
|
self.should_restart = False
|
|
continue
|
|
|
|
elapsed_time = time.time() - start_time
|
|
|
|
if elapsed_time >= video_duration:
|
|
break
|
|
|
|
# Get standard video frame based on elapsed time
|
|
target_frame_idx = int(elapsed_time * video_fps)
|
|
standard_cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame_idx)
|
|
ret_standard, standard_frame = standard_cap.read()
|
|
|
|
if not ret_standard:
|
|
continue
|
|
|
|
# Get webcam frame
|
|
ret_webcam, webcam_frame = self.motion_app.read_camera_frame()
|
|
if not ret_webcam or webcam_frame is None:
|
|
continue
|
|
|
|
# Process frames
|
|
frame_counter += 1
|
|
webcam_frame = cv2.flip(webcam_frame, 1)
|
|
standard_frame = cv2.resize(standard_frame, (target_width, target_height))
|
|
webcam_frame = cv2.resize(webcam_frame, (target_width, target_height))
|
|
|
|
# Pose detection
|
|
try:
|
|
standard_keypoints, standard_scores = self.motion_app.body_detector(standard_frame)
|
|
webcam_keypoints, webcam_scores = self.motion_app.body_detector(webcam_frame)
|
|
|
|
# Draw skeletons
|
|
from rtmlib import draw_skeleton
|
|
standard_with_keypoints = draw_skeleton(
|
|
standard_frame.copy(), standard_keypoints, standard_scores,
|
|
openpose_skeleton=True, kpt_thr=0.43
|
|
)
|
|
webcam_with_keypoints = draw_skeleton(
|
|
webcam_frame.copy(), webcam_keypoints, webcam_scores,
|
|
openpose_skeleton=True, kpt_thr=0.43
|
|
)
|
|
|
|
# Emit frames for display
|
|
self.frame_ready.emit(standard_with_keypoints, webcam_with_keypoints)
|
|
|
|
# Send pose data to similarity analyzer (in separate thread)
|
|
if standard_keypoints is not None and webcam_keypoints is not None:
|
|
standard_angles = self.motion_app.similarity_analyzer.extract_joint_angles(
|
|
standard_keypoints, standard_scores)
|
|
webcam_angles = self.motion_app.similarity_analyzer.extract_joint_angles(
|
|
webcam_keypoints, webcam_scores)
|
|
|
|
if standard_angles and webcam_angles:
|
|
try:
|
|
self.motion_app.pose_data_queue.put_nowait(
|
|
(elapsed_time, standard_angles, webcam_angles))
|
|
except:
|
|
pass
|
|
|
|
except Exception as e:
|
|
continue
|
|
|
|
# Update progress (less frequently)
|
|
if frame_counter % 10 == 0:
|
|
progress = min(elapsed_time / video_duration, 1.0)
|
|
processing_fps = frame_counter / elapsed_time if elapsed_time > 0 else 0
|
|
status_text = f"时间: {elapsed_time:.1f}s / {video_duration:.1f}s | 处理帧率: {processing_fps:.1f} FPS"
|
|
self.progress_update.emit(progress, status_text)
|
|
|
|
# Cleanup
|
|
if audio_loaded:
|
|
self.motion_app.audio_player.stop()
|
|
standard_cap.release()
|
|
|
|
self.comparison_finished.emit()
|
|
self.is_running = False
|
|
|
|
class VideoDisplayWidget(QWidget):
|
|
def __init__(self, motion_app):
|
|
super().__init__()
|
|
self.motion_app = motion_app
|
|
self.video_thread = VideoThread(motion_app)
|
|
self.setup_ui()
|
|
self.connect_signals()
|
|
|
|
def setup_ui(self):
|
|
layout = QVBoxLayout(self)
|
|
|
|
# Title
|
|
title_label = QLabel("📺 视频比较")
|
|
title_label.setAlignment(Qt.AlignCenter)
|
|
title_label.setStyleSheet("font-size: 16pt; font-weight: bold; margin: 10px;")
|
|
layout.addWidget(title_label)
|
|
|
|
# Video displays
|
|
video_layout = QHBoxLayout()
|
|
|
|
# Standard video
|
|
standard_layout = QVBoxLayout()
|
|
standard_title = QLabel("🎯 标准动作视频")
|
|
standard_title.setAlignment(Qt.AlignCenter)
|
|
standard_title.setStyleSheet("font-weight: bold; margin: 5px;")
|
|
self.standard_label = QLabel()
|
|
self.standard_label.setAlignment(Qt.AlignCenter)
|
|
self.standard_label.setMinimumSize(480, 360)
|
|
self.standard_label.setStyleSheet("border: 1px solid #ccc; background-color: #f0f0f0;")
|
|
self.standard_label.setText("等待视频...")
|
|
|
|
standard_layout.addWidget(standard_title)
|
|
standard_layout.addWidget(self.standard_label)
|
|
|
|
# Webcam video
|
|
webcam_layout = QVBoxLayout()
|
|
webcam_title = QLabel("📹 实时影像")
|
|
webcam_title.setAlignment(Qt.AlignCenter)
|
|
webcam_title.setStyleSheet("font-weight: bold; margin: 5px;")
|
|
self.webcam_label = QLabel()
|
|
self.webcam_label.setAlignment(Qt.AlignCenter)
|
|
self.webcam_label.setMinimumSize(480, 360)
|
|
self.webcam_label.setStyleSheet("border: 1px solid #ccc; background-color: #f0f0f0;")
|
|
self.webcam_label.setText("等待摄像头...")
|
|
|
|
webcam_layout.addWidget(webcam_title)
|
|
webcam_layout.addWidget(self.webcam_label)
|
|
|
|
video_layout.addLayout(standard_layout)
|
|
video_layout.addLayout(webcam_layout)
|
|
layout.addLayout(video_layout)
|
|
|
|
# Control buttons
|
|
button_layout = QHBoxLayout()
|
|
self.stop_button = QPushButton("⏹️ 停止")
|
|
self.restart_button = QPushButton("🔄 重新开始")
|
|
|
|
self.stop_button.clicked.connect(self.stop_comparison)
|
|
self.restart_button.clicked.connect(self.restart_comparison)
|
|
|
|
self.stop_button.setEnabled(False)
|
|
self.restart_button.setEnabled(False)
|
|
|
|
button_layout.addWidget(self.stop_button)
|
|
button_layout.addWidget(self.restart_button)
|
|
layout.addLayout(button_layout)
|
|
|
|
# Progress bar
|
|
self.progress_bar = QProgressBar()
|
|
self.status_label = QLabel("准备就绪")
|
|
|
|
layout.addWidget(self.progress_bar)
|
|
layout.addWidget(self.status_label)
|
|
|
|
def connect_signals(self):
|
|
self.video_thread.frame_ready.connect(self.update_frames)
|
|
self.video_thread.progress_update.connect(self.update_progress)
|
|
self.video_thread.comparison_finished.connect(self.on_comparison_finished)
|
|
|
|
def load_video(self, video_path):
|
|
self.video_thread.load_video(video_path)
|
|
|
|
def start_comparison(self, video_path):
|
|
self.stop_button.setEnabled(True)
|
|
self.restart_button.setEnabled(True)
|
|
self.video_thread.start_comparison(video_path)
|
|
|
|
def stop_comparison(self):
|
|
self.video_thread.stop_comparison()
|
|
self.stop_button.setEnabled(False)
|
|
self.restart_button.setEnabled(False)
|
|
|
|
def restart_comparison(self):
|
|
self.video_thread.restart_comparison()
|
|
|
|
def update_frames(self, standard_frame, webcam_frame):
|
|
# Convert frames to Qt format and display
|
|
standard_pixmap = self.numpy_to_pixmap(standard_frame)
|
|
webcam_pixmap = self.numpy_to_pixmap(webcam_frame)
|
|
|
|
self.standard_label.setPixmap(standard_pixmap.scaled(
|
|
self.standard_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
|
|
self.webcam_label.setPixmap(webcam_pixmap.scaled(
|
|
self.webcam_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
|
|
|
|
def update_progress(self, progress, status_text):
|
|
self.progress_bar.setValue(int(progress * 100))
|
|
self.status_label.setText(status_text)
|
|
|
|
def on_comparison_finished(self):
|
|
self.stop_button.setEnabled(False)
|
|
self.restart_button.setEnabled(False)
|
|
self.status_label.setText("比较完成")
|
|
|
|
def numpy_to_pixmap(self, frame):
|
|
"""Convert numpy array (BGR) to QPixmap"""
|
|
# Convert BGR to RGB
|
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
h, w, ch = rgb_frame.shape
|
|
bytes_per_line = ch * w
|
|
qt_image = QImage(rgb_frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
|
|
return QPixmap.fromImage(qt_image)
|