Compare commits
7 Commits
826e545652
...
webrtc
| Author | SHA1 | Date | |
|---|---|---|---|
| 417e50983e | |||
| 96335af6ee | |||
| 877dd8ca70 | |||
| 1300c41172 | |||
| cffc59a285 | |||
| 7752eaaf9d | |||
| c227beeaca |
300
mucapy/AboutWindow.py
Normal file
300
mucapy/AboutWindow.py
Normal file
@@ -0,0 +1,300 @@
|
||||
from PyQt5.QtCore import Qt, QTimer, QDir, QSize, QDateTime, QRect, QThread, pyqtSignal, QMutex, QObject, QEvent
|
||||
from PyQt5.QtGui import (QImage, QPixmap, QIcon, QColor, QKeySequence, QPainter,
|
||||
QPen, QBrush)
|
||||
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout,
|
||||
QWidget, QLabel, QPushButton, QComboBox, QSpinBox,
|
||||
QFileDialog, QMessageBox, QMenu, QAction, QActionGroup, QGridLayout, QGroupBox,
|
||||
QDockWidget, QScrollArea, QToolButton, QDialog,
|
||||
QShortcut, QListWidget, QFormLayout, QLineEdit,
|
||||
QCheckBox, QTabWidget, QListWidgetItem, QSplitter,
|
||||
QProgressBar, QSizePolicy)
|
||||
import todopackage.todo as todo
|
||||
from utility import getpath
|
||||
import cv2
|
||||
import sys
|
||||
import psutil
|
||||
import numpy as np
|
||||
import requests
|
||||
from initqt import initQT
|
||||
|
||||
class AboutWindow(QDialog):
|
||||
def __init__(self, parent=None):
|
||||
global todo_style_path
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle("About Multi-Camera YOLO Detection")
|
||||
self.setWindowIcon(QIcon.fromTheme("help-about"))
|
||||
self.resize(450, 420)
|
||||
|
||||
self.setWindowModality(Qt.ApplicationModal)
|
||||
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
|
||||
|
||||
layout = QVBoxLayout()
|
||||
layout.setAlignment(Qt.AlignTop)
|
||||
layout.setSpacing(20)
|
||||
|
||||
# App icon
|
||||
icon_label = QLabel()
|
||||
icon_label.setPixmap(QIcon.fromTheme("camera-web").pixmap(64, 64))
|
||||
icon_label.setAlignment(Qt.AlignCenter)
|
||||
layout.addWidget(icon_label)
|
||||
|
||||
# Title
|
||||
title_label = QLabel("PySec")
|
||||
title_label.setStyleSheet("font-size: 18px; font-weight: bold;")
|
||||
title_label.setAlignment(Qt.AlignCenter)
|
||||
layout.addWidget(title_label)
|
||||
|
||||
# Version label
|
||||
version_label = QLabel("Version 1.0")
|
||||
version_label.setAlignment(Qt.AlignCenter)
|
||||
layout.addWidget(version_label)
|
||||
|
||||
# Get system info
|
||||
info = self.get_system_info()
|
||||
self.important_keys = ["Python", "OpenCV", "Memory", "CUDA"]
|
||||
self.full_labels = {}
|
||||
|
||||
# === System Info Group ===
|
||||
self.sysinfo_box = QGroupBox()
|
||||
sysinfo_main_layout = QVBoxLayout()
|
||||
sysinfo_main_layout.setContentsMargins(8, 8, 8, 8)
|
||||
|
||||
# Header layout: title + triangle button
|
||||
header_layout = QHBoxLayout()
|
||||
header_label = QLabel("System Information")
|
||||
header_label.setStyleSheet("font-weight: bold;")
|
||||
header_layout.addWidget(header_label)
|
||||
|
||||
header_layout.addStretch()
|
||||
|
||||
self.toggle_btn = QToolButton()
|
||||
self.toggle_btn.setText("▶")
|
||||
self.toggle_btn.setCheckable(True)
|
||||
self.toggle_btn.setChecked(False)
|
||||
toggle_btn_style = getpath.resource_path("styling/togglebtnabout.qss")
|
||||
try:
|
||||
with open(toggle_btn_style, "r") as tgbstyle:
|
||||
self.toggle_btn.setStyleSheet(tgbstyle.read())
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# Debug shit
|
||||
#print("i did shit")
|
||||
|
||||
self.toggle_btn.toggled.connect(self.toggle_expand)
|
||||
header_layout.addWidget(self.toggle_btn)
|
||||
|
||||
sysinfo_main_layout.addLayout(header_layout)
|
||||
|
||||
# Details layout
|
||||
self.sysinfo_layout = QVBoxLayout()
|
||||
self.sysinfo_layout.setSpacing(5)
|
||||
|
||||
for key, value in info.items():
|
||||
if key == "MemoryGB":
|
||||
continue
|
||||
|
||||
label = QLabel(f"{key}: {value}")
|
||||
self.style_label(label, key, value)
|
||||
self.sysinfo_layout.addWidget(label)
|
||||
self.full_labels[key] = label
|
||||
|
||||
if key not in self.important_keys:
|
||||
label.setVisible(False)
|
||||
|
||||
sysinfo_main_layout.addLayout(self.sysinfo_layout)
|
||||
self.sysinfo_box.setLayout(sysinfo_main_layout)
|
||||
layout.addWidget(self.sysinfo_box)
|
||||
|
||||
# Close button
|
||||
close_btn = QPushButton("Close")
|
||||
close_btn.clicked.connect(self.accept)
|
||||
close_btn.setFixedWidth(100)
|
||||
layout.addWidget(close_btn, alignment=Qt.AlignCenter)
|
||||
|
||||
# Set Styling for About Section
|
||||
style_file = getpath.resource_path("styling/about.qss")
|
||||
try:
|
||||
with open(style_file, "r") as aboutstyle:
|
||||
self.setStyleSheet(aboutstyle.read())
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
# Todo Label Shit
|
||||
self.todo_obj = todo
|
||||
todo_text = self.get_todo_text()
|
||||
todo_label = QLabel(f"<pre>{todo_text}</pre>")
|
||||
todo_label.setWordWrap(True)
|
||||
todo_label.setAlignment(Qt.AlignLeft)
|
||||
|
||||
# TODO: Fix this xD ; Fixing a TODO lol
|
||||
try:
|
||||
todo_style_path = getpath.resource_path("styling/todostyle.qss")
|
||||
with open(todo_style_path, "r") as tdf:
|
||||
todo_label.setStyleSheet(tdf.read())
|
||||
# here we have our wonderfull fix
|
||||
if True == True:
|
||||
todo_label.setStyleSheet("color: #f7ef02; font-style: italic;")
|
||||
else:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
print(f"Missing a Style File! => {todo_style_path}")
|
||||
pass
|
||||
|
||||
# Create the labels for the fucking trodo ass shit ?
|
||||
self.todo_archive_object = todo
|
||||
todo_archive_text = self.get_archive_text()
|
||||
todo_archive_label = QLabel(f"<pre>{todo_archive_text}</pre>")
|
||||
todo_archive_label.setWordWrap(True)
|
||||
todo_archive_label.setAlignment(Qt.AlignLeft)
|
||||
todo_archive_label.setStyleSheet("color: #02d1fa ;font-style: italic;")
|
||||
|
||||
self.info_obj = todo
|
||||
info_text = self.get_info_text()
|
||||
info_label = QLabel(f"<pre>{info_text}</pre>")
|
||||
info_label.setWordWrap(True)
|
||||
info_label.setAlignment(Qt.AlignCenter)
|
||||
info_label.setStyleSheet("color: #2ecc71 ; font-style: italic;")
|
||||
|
||||
self.camobj = todo
|
||||
cam_text = self.get_cam_text()
|
||||
cam_label = QLabel(f"<pre>{cam_text}</pre>")
|
||||
cam_label.setWordWrap(True)
|
||||
cam_label.setAlignment(Qt.AlignCenter)
|
||||
cam_label.setStyleSheet("color: #ffffff; font-style: italic;")
|
||||
|
||||
if True == True:
|
||||
layout.addWidget(info_label)
|
||||
layout.addWidget(todo_label)
|
||||
layout.addWidget(todo_archive_label)
|
||||
layout.addWidget(cam_label)
|
||||
else:
|
||||
pass
|
||||
|
||||
def toggle_expand(self, checked):
|
||||
for key, label in self.full_labels.items():
|
||||
if key not in self.important_keys:
|
||||
label.setVisible(checked)
|
||||
self.toggle_btn.setText("▼" if checked else "▶")
|
||||
|
||||
def style_label(self, label, key, value):
|
||||
if key == "Python":
|
||||
label.setStyleSheet("color: #7FDBFF;")
|
||||
elif key == "OpenCV":
|
||||
label.setStyleSheet("color: #FF851B;")
|
||||
elif key == "CUDA":
|
||||
label.setStyleSheet("color: green;" if value == "Yes" else "color: red;")
|
||||
elif key == "NumPy":
|
||||
label.setStyleSheet("color: #B10DC9;")
|
||||
elif key == "Requests":
|
||||
label.setStyleSheet("color: #0074D9;")
|
||||
elif key == "Memory":
|
||||
try:
|
||||
ram = int(value.split()[0])
|
||||
if ram < 8:
|
||||
label.setStyleSheet("color: red;")
|
||||
elif ram < 16:
|
||||
label.setStyleSheet("color: yellow;")
|
||||
elif ram < 32:
|
||||
label.setStyleSheet("color: lightgreen;")
|
||||
else:
|
||||
label.setStyleSheet("color: #90EE90;")
|
||||
except:
|
||||
label.setStyleSheet("color: gray;")
|
||||
elif key == "CPU Usage":
|
||||
try:
|
||||
usage = float(value.strip('%'))
|
||||
if usage > 80:
|
||||
label.setStyleSheet("color: red;")
|
||||
elif usage > 50:
|
||||
label.setStyleSheet("color: yellow;")
|
||||
else:
|
||||
label.setStyleSheet("color: lightgreen;")
|
||||
except:
|
||||
label.setStyleSheet("color: gray;")
|
||||
elif key in ("CPU Cores", "Logical CPUs"):
|
||||
label.setStyleSheet("color: lightgreen;")
|
||||
elif key in ("CPU", "Architecture", "OS"):
|
||||
label.setStyleSheet("color: lightgray;")
|
||||
else:
|
||||
label.setStyleSheet("color: #DDD;")
|
||||
|
||||
def get_system_info(self):
|
||||
import platform
|
||||
|
||||
info = {}
|
||||
info['Python'] = sys.version.split()[0]
|
||||
info['OS'] = f"{platform.system()} {platform.release()}"
|
||||
info['Architecture'] = platform.machine()
|
||||
info['OpenCV'] = cv2.__version__
|
||||
info['CUDA'] = "Yes" if cv2.cuda.getCudaEnabledDeviceCount() > 0 else "No"
|
||||
info['NumPy'] = np.__version__
|
||||
info['Requests'] = requests.__version__
|
||||
|
||||
# If we are on Linux we display the QTVAR
|
||||
if platform.system() == "Linux":
|
||||
info["XDG_ENVIROMENT_TYPE "] = initQT.getenv(self) # get the stupid env var of qt
|
||||
else:
|
||||
pass
|
||||
|
||||
mem = psutil.virtual_memory()
|
||||
info['MemoryGB'] = mem.total // (1024 ** 3)
|
||||
info['Memory'] = f"{info['MemoryGB']} GB RAM"
|
||||
|
||||
info['CPU Cores'] = psutil.cpu_count(logical=False)
|
||||
info['Logical CPUs'] = psutil.cpu_count(logical=True)
|
||||
info['CPU Usage'] = f"{psutil.cpu_percent()}%"
|
||||
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
info['CPU'] = platform.processor()
|
||||
elif sys.platform == "linux":
|
||||
info['CPU'] = subprocess.check_output("lscpu", shell=True).decode().split("\n")[0]
|
||||
elif sys.platform == "darwin":
|
||||
info['CPU'] = subprocess.check_output(["sysctl", "-n", "machdep.cpu.brand_string"]).decode().strip()
|
||||
except Exception:
|
||||
info['CPU'] = "Unknown"
|
||||
|
||||
return info
|
||||
|
||||
def get_todo_text(self):
|
||||
try:
|
||||
todo_text = self.todo_obj.todo.gettodo()
|
||||
if isinstance(todo_text, str):
|
||||
return todo_text.strip()
|
||||
else:
|
||||
return "Invalid TODO format."
|
||||
except Exception as e:
|
||||
return f"Error retrieving TODO: {e}"
|
||||
|
||||
def get_info_text(self):
|
||||
try:
|
||||
info_text = self.info_obj.todo.getinfo()
|
||||
if isinstance(info_text, str):
|
||||
return info_text.strip()
|
||||
else:
|
||||
return "Invalid"
|
||||
except Exception as e:
|
||||
return f"fuck you => {e}"
|
||||
|
||||
def get_archive_text(self):
|
||||
try:
|
||||
todo_archive_text = self.todo_archive_object.todo.getarchive()
|
||||
if isinstance(todo_archive_text, str):
|
||||
return todo_archive_text.strip()
|
||||
else:
|
||||
return "invalid format??"
|
||||
except Exception as e:
|
||||
return "?? ==> {e}"
|
||||
|
||||
def get_cam_text(self):
|
||||
try:
|
||||
cam_text = self.camobj.todo.getcams()
|
||||
if isinstance(cam_text, str):
|
||||
return cam_text.strip()
|
||||
else:
|
||||
return "invalid cam format"
|
||||
except Exception as e:
|
||||
return f"You are fuck you {e}"
|
||||
179
mucapy/AlertWorker.py
Normal file
179
mucapy/AlertWorker.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import shutil
|
||||
import wave
|
||||
try:
|
||||
import simpleaudio as sa
|
||||
except ImportError:
|
||||
sa = None
|
||||
sa = None # Force it to not use it cause it fucks stuff up
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
|
||||
class AlertWorker(QThread):
|
||||
"""Worker thread to play an alert sound safely without blocking UI.
|
||||
Uses winsound on Windows, external system players on Unix (afplay/paplay/aplay/ffplay),
|
||||
and falls back to simpleaudio if available. Supports cooperative stop.
|
||||
"""
|
||||
finished = pyqtSignal(bool, str) # success, message
|
||||
|
||||
def __init__(self, wav_path: str, parent=None):
|
||||
super().__init__(parent)
|
||||
self.wav_path = wav_path
|
||||
self._stop = False
|
||||
self._subproc = None
|
||||
self._play_obj = None
|
||||
|
||||
def stop(self):
|
||||
"""Request the worker to stop early."""
|
||||
try:
|
||||
self._stop = True
|
||||
if self._play_obj is not None:
|
||||
try:
|
||||
self._play_obj.stop()
|
||||
except Exception:
|
||||
pass
|
||||
if self._subproc is not None:
|
||||
try:
|
||||
self._subproc.terminate()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _find_unix_player(self):
|
||||
"""Return (cmd_list, name) for an available player on Unix or (None, None)."""
|
||||
try:
|
||||
if sys.platform.startswith('darwin'):
|
||||
if shutil.which('afplay'):
|
||||
return (['afplay'], 'afplay')
|
||||
# Linux and others
|
||||
if shutil.which('paplay'):
|
||||
return (['paplay'], 'paplay')
|
||||
if shutil.which('aplay'):
|
||||
return (['aplay', '-q'], 'aplay')
|
||||
if shutil.which('ffplay'):
|
||||
return (['ffplay', '-nodisp', '-autoexit', '-loglevel', 'error'], 'ffplay')
|
||||
except Exception:
|
||||
pass
|
||||
return (None, None)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
if not os.path.exists(self.wav_path):
|
||||
self.finished.emit(False, f"File not found: {self.wav_path}")
|
||||
return
|
||||
|
||||
# Windows path: prefer winsound (native, safe)
|
||||
if sys.platform.startswith('win'):
|
||||
ws_error = "unknown"
|
||||
try:
|
||||
import winsound as _ws # type: ignore
|
||||
# Resolve flags safely even if some attributes are missing
|
||||
SND_FILENAME = getattr(_ws, 'SND_FILENAME', 0x00020000)
|
||||
SND_SYNC = getattr(_ws, 'SND_SYNC', 0x0000) # 0 is synchronous by default
|
||||
flags = SND_FILENAME | SND_SYNC
|
||||
# Ensure PlaySound exists
|
||||
play_fn = getattr(_ws, 'PlaySound', None)
|
||||
if play_fn is None:
|
||||
raise RuntimeError('winsound.PlaySound not available')
|
||||
for _ in range(4):
|
||||
if self._stop:
|
||||
break
|
||||
try:
|
||||
play_fn(self.wav_path, flags)
|
||||
except Exception as e:
|
||||
# On failure, break to try alternative backends
|
||||
ws_error = str(e)
|
||||
break
|
||||
time.sleep(0.001)
|
||||
else:
|
||||
# Completed all 4 plays
|
||||
self.finished.emit(True, "Alert played")
|
||||
return
|
||||
# If here, winsound failed at some point; continue to fallbacks
|
||||
except Exception as e:
|
||||
ws_error = str(e)
|
||||
# Try simpleaudio on Windows as fallback
|
||||
if sa is not None:
|
||||
try:
|
||||
with wave.open(self.wav_path, 'rb') as wf:
|
||||
n_channels = max(1, wf.getnchannels())
|
||||
sampwidth = max(1, wf.getsampwidth())
|
||||
framerate = max(8000, wf.getframerate() or 44100)
|
||||
frames = wf.readframes(wf.getnframes())
|
||||
for _ in range(4):
|
||||
if self._stop:
|
||||
break
|
||||
self._play_obj = sa.play_buffer(frames, n_channels, sampwidth, framerate)
|
||||
self._play_obj.wait_done()
|
||||
time.sleep(0.002)
|
||||
self.finished.emit(True, "Alert played")
|
||||
return
|
||||
except Exception as e2:
|
||||
self.finished.emit(False, f"Playback error (winsound fallback -> simpleaudio): {e2}")
|
||||
return
|
||||
else:
|
||||
self.finished.emit(False, f"Audio backend not available (winsound failed: {ws_error})")
|
||||
return
|
||||
|
||||
# Non-Windows: try external players first
|
||||
cmd, name = self._find_unix_player()
|
||||
if cmd is not None:
|
||||
for _ in range(4):
|
||||
if self._stop:
|
||||
break
|
||||
try:
|
||||
self._subproc = subprocess.Popen(cmd + [self.wav_path], stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
# Poll until done or stop requested
|
||||
while True:
|
||||
if self._stop:
|
||||
try:
|
||||
self._subproc.terminate()
|
||||
except Exception:
|
||||
pass
|
||||
break
|
||||
ret = self._subproc.poll()
|
||||
if ret is not None:
|
||||
break
|
||||
time.sleep(0.01)
|
||||
except Exception as e:
|
||||
# Try next backend
|
||||
cmd = None
|
||||
break
|
||||
finally:
|
||||
self._subproc = None
|
||||
time.sleep(0.002)
|
||||
if cmd is not None:
|
||||
self.finished.emit(True, "Alert played")
|
||||
return
|
||||
|
||||
# Fallback: simpleaudio if available
|
||||
if sa is not None:
|
||||
try:
|
||||
with wave.open(self.wav_path, 'rb') as wf:
|
||||
n_channels = max(1, wf.getnchannels())
|
||||
sampwidth = max(1, wf.getsampwidth())
|
||||
framerate = max(8000, wf.getframerate() or 44100)
|
||||
frames = wf.readframes(wf.getnframes())
|
||||
for _ in range(4):
|
||||
if self._stop:
|
||||
break
|
||||
self._play_obj = sa.play_buffer(frames, n_channels, sampwidth, framerate)
|
||||
self._play_obj.wait_done()
|
||||
time.sleep(0.002)
|
||||
self.finished.emit(True, "Alert played")
|
||||
return
|
||||
except Exception as e:
|
||||
self.finished.emit(False, f"Playback error (simpleaudio): {e}")
|
||||
return
|
||||
|
||||
self.finished.emit(False, "No audio backend available (afplay/paplay/aplay/ffplay/simpleaudio)")
|
||||
except Exception as e:
|
||||
try:
|
||||
self.finished.emit(False, str(e))
|
||||
except Exception:
|
||||
pass
|
||||
127
mucapy/CameraDisplay.py
Normal file
127
mucapy/CameraDisplay.py
Normal file
@@ -0,0 +1,127 @@
|
||||
from PyQt5.QtCore import Qt, QDateTime, QRect
|
||||
from PyQt5.QtGui import (QColor, QPainter,
|
||||
QPen, QBrush)
|
||||
from PyQt5.QtWidgets import (QApplication, QLabel, QFileDialog, QMessageBox)
|
||||
from utility import getpath
|
||||
from Config import Config
|
||||
from PopoutWindow import PopoutWindow
|
||||
import os
|
||||
|
||||
class CameraDisplay(QLabel):
|
||||
"""Custom QLabel for displaying camera feed with fullscreen support"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setAlignment(Qt.AlignCenter)
|
||||
self.setText("No camera feed")
|
||||
|
||||
self.get_camera_display_style = getpath.resource_path("styling/camera_display.qss")
|
||||
try:
|
||||
with open(self.get_camera_display_style, "r") as cdst:
|
||||
self.setStyleSheet(cdst.read())
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
self.setMinimumSize(320, 240)
|
||||
self.fullscreen_window = None
|
||||
self.cam_id = None
|
||||
self.fullscreen_timer = None
|
||||
self.config = Config()
|
||||
self.screenshot_dir = self.config.load_setting('screenshot_dir', os.path.expanduser('~/Pictures/MuCaPy'))
|
||||
self.camera_name = None
|
||||
|
||||
# Create screenshot directory if it doesn't exist
|
||||
if not os.path.exists(self.screenshot_dir):
|
||||
os.makedirs(self.screenshot_dir, exist_ok=True)
|
||||
|
||||
def set_cam_id(self, cam_id):
|
||||
"""Set camera identifier for this display"""
|
||||
self.cam_id = cam_id
|
||||
|
||||
def set_camera_name(self, name):
|
||||
"""Set the camera name for display"""
|
||||
self.camera_name = name
|
||||
self.update()
|
||||
|
||||
def take_screenshot(self):
|
||||
"""Take a screenshot of the current frame"""
|
||||
if not self.pixmap():
|
||||
return
|
||||
|
||||
# Ask for screenshot directory if not set
|
||||
if not self.screenshot_dir:
|
||||
dir_path = QFileDialog.getExistingDirectory(
|
||||
self,
|
||||
"Select Screenshot Directory",
|
||||
os.path.expanduser('~/Pictures'),
|
||||
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks
|
||||
)
|
||||
if dir_path:
|
||||
self.screenshot_dir = dir_path
|
||||
self.config.save_setting('screenshot_dir', dir_path)
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
else:
|
||||
return
|
||||
|
||||
# Generate filename with timestamp
|
||||
timestamp = QDateTime.currentDateTime().toString('yyyy-MM-dd_hh-mm-ss')
|
||||
filename = f"camera_{self.cam_id}_{timestamp}.png"
|
||||
filepath = os.path.join(self.screenshot_dir, filename)
|
||||
|
||||
# Save the image
|
||||
if self.pixmap().save(filepath):
|
||||
QMessageBox.information(self, "Success", f"Screenshot saved to:\n{filepath}")
|
||||
else:
|
||||
QMessageBox.critical(self, "Error", "Failed to save screenshot")
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
"""Handle double click to toggle fullscreen"""
|
||||
if self.pixmap() and not self.fullscreen_window:
|
||||
self.show_fullscreen()
|
||||
elif self.fullscreen_window:
|
||||
self.close_fullscreen()
|
||||
|
||||
def show_fullscreen(self):
|
||||
"""Show this camera in a new window (enhanced popout)"""
|
||||
if not self.pixmap():
|
||||
return
|
||||
# Create enhanced popout window
|
||||
self.fullscreen_window = PopoutWindow(self, cam_id=self.cam_id, parent=self.window())
|
||||
# Size and show
|
||||
screen = QApplication.primaryScreen().availableGeometry()
|
||||
self.fullscreen_window.resize(min(1280, int(screen.width() * 0.9)), min(720, int(screen.height() * 0.9)))
|
||||
self.fullscreen_window.show()
|
||||
# ESC shortcut already handled inside PopoutWindow
|
||||
|
||||
def update_fullscreen(self, label):
|
||||
"""Kept for backward compatibility; PopoutWindow manages its own refresh."""
|
||||
if self.pixmap():
|
||||
label.setPixmap(self.pixmap().scaled(
|
||||
label.size(),
|
||||
Qt.KeepAspectRatio,
|
||||
Qt.SmoothTransformation
|
||||
))
|
||||
|
||||
def close_fullscreen(self):
|
||||
"""Close the fullscreen window"""
|
||||
if self.fullscreen_window:
|
||||
self.fullscreen_window.close()
|
||||
self.fullscreen_window = None
|
||||
|
||||
def paintEvent(self, event):
|
||||
"""Override paint event to draw camera name overlay"""
|
||||
super().paintEvent(event)
|
||||
if self.camera_name and self.pixmap():
|
||||
painter = QPainter(self)
|
||||
painter.setRenderHint(QPainter.Antialiasing)
|
||||
|
||||
# Draw semi-transparent background
|
||||
painter.setPen(Qt.NoPen)
|
||||
painter.setBrush(QBrush(QColor(0, 0, 0, 180)))
|
||||
rect = QRect(10, 10, painter.fontMetrics().width(self.camera_name) + 20, 30)
|
||||
painter.drawRoundedRect(rect, 5, 5)
|
||||
|
||||
# Draw text
|
||||
painter.setPen(QPen(QColor(255, 255, 255)))
|
||||
painter.drawText(rect, Qt.AlignCenter, self.camera_name)
|
||||
27
mucapy/CameraScanThread.py
Normal file
27
mucapy/CameraScanThread.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import sys
|
||||
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
|
||||
class CameraScanThread(QThread):
|
||||
scan_finished = pyqtSignal(list, dict)
|
||||
|
||||
def __init__(self, detector, max_to_check=10, parent=None):
|
||||
super().__init__(parent)
|
||||
self.detector = detector
|
||||
self.max_to_check = max_to_check
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
cams = self.detector.scan_for_cameras(self.max_to_check)
|
||||
names = {}
|
||||
if sys.platform.startswith('win'):
|
||||
try:
|
||||
names = self.detector.get_camera_names_windows(cams)
|
||||
except Exception as e:
|
||||
print(f"Failed to get Windows camera names: {e}")
|
||||
names = {}
|
||||
self.scan_finished.emit(cams, names)
|
||||
except Exception as e:
|
||||
print(f"CameraScanThread error: {e}")
|
||||
self.scan_finished.emit([], {})
|
||||
317
mucapy/CameraSelectorDialog.py
Normal file
317
mucapy/CameraSelectorDialog.py
Normal file
@@ -0,0 +1,317 @@
|
||||
from PyQt5.QtCore import Qt, QTimer, QDir, QSize, QDateTime, QRect, QThread, pyqtSignal, QMutex, QObject, QEvent
|
||||
from PyQt5.QtGui import (QImage, QPixmap, QIcon, QColor, QKeySequence, QPainter,
|
||||
QPen, QBrush)
|
||||
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout,
|
||||
QWidget, QLabel, QPushButton, QComboBox, QSpinBox,
|
||||
QFileDialog, QMessageBox, QMenu, QAction, QActionGroup, QGridLayout, QGroupBox,
|
||||
QDockWidget, QScrollArea, QToolButton, QDialog,
|
||||
QShortcut, QListWidget, QFormLayout, QLineEdit,
|
||||
QCheckBox, QTabWidget, QListWidgetItem, QSplitter,
|
||||
QProgressBar, QSizePolicy)
|
||||
import NetworkCameraDialog
|
||||
from todopackage.todo import todo
|
||||
import os
|
||||
|
||||
class CameraSelectorDialog(QDialog):
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle("Camera Selector")
|
||||
self.setModal(True)
|
||||
self.resize(900, 650) # Increased size for better visibility
|
||||
self.setSizeGripEnabled(True)
|
||||
|
||||
self.detector = parent.detector if parent else None
|
||||
self.selected_cameras = []
|
||||
|
||||
# Main layout
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
# Instructions with better formatting
|
||||
instructions = QLabel(todo.get_instructions_CaSeDi_QLabel())
|
||||
print(todo.get_instructions_CaSeDi_QLabel())
|
||||
|
||||
instructions.setStyleSheet("QLabel { background-color: #2A2A2A; padding: 10px; border-radius: 4px; }")
|
||||
instructions.setWordWrap(True)
|
||||
layout.addWidget(instructions)
|
||||
|
||||
# Split view for cameras
|
||||
splitter = QSplitter(Qt.Horizontal)
|
||||
splitter.setChildrenCollapsible(False)
|
||||
splitter.setHandleWidth(6)
|
||||
|
||||
# Left side - Available Cameras
|
||||
left_widget = QWidget()
|
||||
left_layout = QVBoxLayout(left_widget)
|
||||
left_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
|
||||
# Local Cameras Group
|
||||
local_group = QGroupBox("Local Cameras")
|
||||
local_group.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
local_layout = QVBoxLayout()
|
||||
self.local_list = QListWidget()
|
||||
self.local_list.setSelectionMode(QListWidget.ExtendedSelection)
|
||||
self.local_list.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
local_layout.addWidget(self.local_list)
|
||||
local_group.setLayout(local_layout)
|
||||
left_layout.addWidget(local_group)
|
||||
|
||||
# Network Cameras Group
|
||||
network_group = QGroupBox("Network Cameras")
|
||||
network_group.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
network_layout = QVBoxLayout()
|
||||
self.network_list = QListWidget()
|
||||
self.network_list.setSelectionMode(QListWidget.ExtendedSelection)
|
||||
self.network_list.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
network_layout.addWidget(self.network_list)
|
||||
network_group.setLayout(network_layout)
|
||||
left_layout.addWidget(network_group)
|
||||
|
||||
# Camera management buttons
|
||||
btn_layout = QHBoxLayout()
|
||||
self.refresh_btn = QPushButton("Refresh")
|
||||
self.refresh_btn.clicked.connect(self.refresh_cameras)
|
||||
add_net_btn = QPushButton("Add Network Camera")
|
||||
add_net_btn.clicked.connect(self.show_network_dialog)
|
||||
|
||||
btn_layout.addWidget(self.refresh_btn)
|
||||
btn_layout.addWidget(add_net_btn)
|
||||
left_layout.addLayout(btn_layout)
|
||||
|
||||
# Make lists expand and buttons stay minimal in left pane
|
||||
left_layout.setStretch(0, 1)
|
||||
left_layout.setStretch(1, 1)
|
||||
left_layout.setStretch(2, 0)
|
||||
|
||||
splitter.addWidget(left_widget)
|
||||
splitter.setStretchFactor(0, 1)
|
||||
|
||||
# Right side - Selected Cameras Preview
|
||||
right_widget = QWidget()
|
||||
right_layout = QVBoxLayout(right_widget)
|
||||
right_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
|
||||
preview_label = QLabel("Selected Cameras Preview")
|
||||
preview_label.setStyleSheet("font-weight: bold;")
|
||||
right_layout.addWidget(preview_label)
|
||||
|
||||
self.preview_list = QListWidget()
|
||||
self.preview_list.setDragDropMode(QListWidget.InternalMove)
|
||||
self.preview_list.setSelectionMode(QListWidget.ExtendedSelection)
|
||||
self.preview_list.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
right_layout.addWidget(self.preview_list)
|
||||
|
||||
# Preview controls
|
||||
preview_btn_layout = QHBoxLayout()
|
||||
remove_btn = QPushButton("Remove Selected")
|
||||
remove_btn.clicked.connect(self.remove_selected)
|
||||
clear_btn = QPushButton("Clear All")
|
||||
clear_btn.clicked.connect(self.clear_selection)
|
||||
|
||||
preview_btn_layout.addWidget(remove_btn)
|
||||
preview_btn_layout.addWidget(clear_btn)
|
||||
right_layout.addLayout(preview_btn_layout)
|
||||
|
||||
# Make preview list expand and buttons stay minimal in right pane
|
||||
right_layout.setStretch(0, 0)
|
||||
right_layout.setStretch(1, 1)
|
||||
right_layout.setStretch(2, 0)
|
||||
|
||||
splitter.addWidget(right_widget)
|
||||
splitter.setStretchFactor(1, 1)
|
||||
layout.addWidget(splitter)
|
||||
|
||||
# Bottom buttons
|
||||
bottom_layout = QHBoxLayout()
|
||||
select_all_btn = QPushButton("Select All")
|
||||
select_all_btn.clicked.connect(self.select_all)
|
||||
ok_btn = QPushButton("OK")
|
||||
ok_btn.clicked.connect(self.accept)
|
||||
cancel_btn = QPushButton("Cancel")
|
||||
cancel_btn.clicked.connect(self.reject)
|
||||
|
||||
bottom_layout.addWidget(select_all_btn)
|
||||
bottom_layout.addStretch()
|
||||
bottom_layout.addWidget(ok_btn)
|
||||
bottom_layout.addWidget(cancel_btn)
|
||||
layout.addLayout(bottom_layout)
|
||||
|
||||
# Connect signals
|
||||
self.local_list.itemChanged.connect(self.update_preview)
|
||||
self.network_list.itemChanged.connect(self.update_preview)
|
||||
self.preview_list.model().rowsMoved.connect(self.update_camera_order)
|
||||
|
||||
# Set splitter sizes
|
||||
splitter.setSizes([450, 450])
|
||||
|
||||
# Initial camera refresh
|
||||
self.refresh_cameras()
|
||||
|
||||
# Restore last selection if available
|
||||
if self.detector:
|
||||
last_selected = self.detector.config.load_setting('last_selected_cameras', [])
|
||||
if last_selected:
|
||||
self.restore_selection(last_selected)
|
||||
|
||||
def refresh_cameras(self):
|
||||
"""Refresh both local and network camera lists asynchronously"""
|
||||
self.local_list.clear()
|
||||
self.network_list.clear()
|
||||
|
||||
if not self.detector:
|
||||
return
|
||||
|
||||
# Show placeholders and disable refresh while scanning
|
||||
self.refresh_btn.setEnabled(False)
|
||||
scanning_item_local = QListWidgetItem("Scanning for cameras…")
|
||||
scanning_item_local.setFlags(Qt.NoItemFlags)
|
||||
self.local_list.addItem(scanning_item_local)
|
||||
scanning_item_net = QListWidgetItem("Loading network cameras…")
|
||||
scanning_item_net.setFlags(Qt.NoItemFlags)
|
||||
self.network_list.addItem(scanning_item_net)
|
||||
|
||||
# Start background scan
|
||||
started = self.detector.start_camera_scan(10)
|
||||
if not started:
|
||||
# If a scan is already running, we'll just wait for its signal
|
||||
pass
|
||||
|
||||
# Connect once to update lists when scan completes
|
||||
try:
|
||||
self.detector.cameras_scanned.disconnect(self._on_scan_finished_dialog)
|
||||
except Exception:
|
||||
pass
|
||||
self.detector.cameras_scanned.connect(self._on_scan_finished_dialog)
|
||||
|
||||
def _on_scan_finished_dialog(self, cams, names):
|
||||
# Re-enable refresh
|
||||
self.refresh_btn.setEnabled(True)
|
||||
# Rebuild lists
|
||||
self.local_list.clear()
|
||||
self.network_list.clear()
|
||||
|
||||
# Local cameras
|
||||
for cam_path in cams:
|
||||
if cam_path.startswith('net:'):
|
||||
continue
|
||||
if cam_path.startswith('/dev/'):
|
||||
display = os.path.basename(cam_path)
|
||||
else:
|
||||
# Numeric index
|
||||
pretty = names.get(cam_path)
|
||||
display = f"{pretty} (#{cam_path})" if pretty else f"Camera {cam_path}"
|
||||
item = QListWidgetItem(display)
|
||||
item.setData(Qt.UserRole, cam_path)
|
||||
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
|
||||
item.setCheckState(Qt.Unchecked)
|
||||
self.local_list.addItem(item)
|
||||
|
||||
# Network cameras
|
||||
for name, camera_info in self.detector.network_cameras.items():
|
||||
if isinstance(camera_info, dict):
|
||||
url = camera_info.get('url', '')
|
||||
has_auth = camera_info.get('username') is not None
|
||||
display_text = f"{name} ({url})"
|
||||
if has_auth:
|
||||
display_text += " 🔒"
|
||||
else:
|
||||
display_text = f"{name} ({camera_info})"
|
||||
item = QListWidgetItem(display_text)
|
||||
item.setData(Qt.UserRole, f"net:{name}")
|
||||
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
|
||||
item.setCheckState(Qt.Unchecked)
|
||||
self.network_list.addItem(item)
|
||||
|
||||
def restore_selection(self, last_selected):
|
||||
"""Restore previous camera selection"""
|
||||
for cam_id in last_selected:
|
||||
# Check local cameras
|
||||
for i in range(self.local_list.count()):
|
||||
item = self.local_list.item(i)
|
||||
if item.data(Qt.UserRole) == cam_id:
|
||||
item.setCheckState(Qt.Checked)
|
||||
|
||||
# Check network cameras
|
||||
for i in range(self.network_list.count()):
|
||||
item = self.network_list.item(i)
|
||||
if item.data(Qt.UserRole) == cam_id:
|
||||
item.setCheckState(Qt.Checked)
|
||||
|
||||
def update_preview(self):
|
||||
"""Update the preview list with currently selected cameras"""
|
||||
self.preview_list.clear()
|
||||
self.selected_cameras = []
|
||||
|
||||
# Get selected local cameras
|
||||
for i in range(self.local_list.count()):
|
||||
item = self.local_list.item(i)
|
||||
if item.checkState() == Qt.Checked:
|
||||
cam_id = item.data(Qt.UserRole)
|
||||
preview_item = QListWidgetItem(f"Local: {item.text()}")
|
||||
preview_item.setData(Qt.UserRole, cam_id)
|
||||
self.preview_list.addItem(preview_item)
|
||||
self.selected_cameras.append(cam_id)
|
||||
|
||||
# Get selected network cameras
|
||||
for i in range(self.network_list.count()):
|
||||
item = self.network_list.item(i)
|
||||
if item.checkState() == Qt.Checked:
|
||||
cam_id = item.data(Qt.UserRole)
|
||||
preview_item = QListWidgetItem(f"Network: {item.text()}")
|
||||
preview_item.setData(Qt.UserRole, cam_id)
|
||||
self.preview_list.addItem(preview_item)
|
||||
self.selected_cameras.append(cam_id)
|
||||
|
||||
# Save the current selection to config
|
||||
if self.detector:
|
||||
self.detector.config.save_setting('last_selected_cameras', self.selected_cameras)
|
||||
|
||||
def update_camera_order(self):
|
||||
"""Update the camera order based on preview list order"""
|
||||
self.selected_cameras = []
|
||||
for i in range(self.preview_list.count()):
|
||||
item = self.preview_list.item(i)
|
||||
self.selected_cameras.append(item.data(Qt.UserRole))
|
||||
|
||||
# Save the new order
|
||||
if self.detector:
|
||||
self.detector.config.save_setting('last_selected_cameras', self.selected_cameras)
|
||||
|
||||
def select_all(self):
|
||||
"""Select all cameras in both lists"""
|
||||
for i in range(self.local_list.count()):
|
||||
self.local_list.item(i).setCheckState(Qt.Checked)
|
||||
for i in range(self.network_list.count()):
|
||||
self.network_list.item(i).setCheckState(Qt.Checked)
|
||||
|
||||
def clear_selection(self):
|
||||
"""Clear all selections"""
|
||||
for i in range(self.local_list.count()):
|
||||
self.local_list.item(i).setCheckState(Qt.Unchecked)
|
||||
for i in range(self.network_list.count()):
|
||||
self.network_list.item(i).setCheckState(Qt.Unchecked)
|
||||
|
||||
def remove_selected(self):
|
||||
"""Remove selected items from the preview list"""
|
||||
selected_items = self.preview_list.selectedItems()
|
||||
for item in selected_items:
|
||||
cam_id = item.data(Qt.UserRole)
|
||||
# Uncheck corresponding items in source lists
|
||||
for i in range(self.local_list.count()):
|
||||
if self.local_list.item(i).data(Qt.UserRole) == cam_id:
|
||||
self.local_list.item(i).setCheckState(Qt.Unchecked)
|
||||
for i in range(self.network_list.count()):
|
||||
if self.network_list.item(i).data(Qt.UserRole) == cam_id:
|
||||
self.network_list.item(i).setCheckState(Qt.Unchecked)
|
||||
|
||||
# Camera connection tests removed for performance reasons per user request.
|
||||
def test_selected_cameras(self):
|
||||
"""Deprecated: Camera tests are disabled to improve performance."""
|
||||
QMessageBox.information(self, "Camera Tests Disabled",
|
||||
"Camera connectivity tests have been removed to speed up the application.")
|
||||
return
|
||||
|
||||
def show_network_dialog(self):
|
||||
"""Show the network camera configuration dialog"""
|
||||
dialog = NetworkCameraDialog(self)
|
||||
if dialog.exec_() == QDialog.Accepted:
|
||||
self.refresh_cameras()
|
||||
642
mucapy/CameraThread.py
Normal file
642
mucapy/CameraThread.py
Normal file
@@ -0,0 +1,642 @@
|
||||
import time
|
||||
import urllib.parse
|
||||
from enum import Enum
|
||||
import logging
|
||||
import traceback
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal, QMutex, QWaitCondition
|
||||
|
||||
try:
|
||||
import rtsp
|
||||
RTSP_LIB_AVAILABLE = True
|
||||
except ImportError:
|
||||
RTSP_LIB_AVAILABLE = False
|
||||
logging.info("rtsp library not available. Install with: pip install rtsp")
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StreamType(Enum):
|
||||
"""Enum for different stream types"""
|
||||
LOCAL = "local"
|
||||
RTSP = "rtsp"
|
||||
HTTP_MJPEG = "http_mjpeg"
|
||||
DROIDCAM = "droidcam"
|
||||
IP_CAMERA = "ip_camera"
|
||||
NETWORK = "network"
|
||||
|
||||
|
||||
class CameraThread(QThread):
|
||||
# Signals
|
||||
frame_ready = pyqtSignal(int, np.ndarray)
|
||||
error_occurred = pyqtSignal(int, str)
|
||||
connection_status = pyqtSignal(int, bool, str) # camera_id, connected, message
|
||||
stats_updated = pyqtSignal(int, dict) # camera_id, stats
|
||||
|
||||
def __init__(self, camera_id, camera_info, parent=None):
|
||||
super().__init__(parent)
|
||||
self.camera_id = camera_id
|
||||
self.camera_info = camera_info
|
||||
self.running = False
|
||||
self.paused = False
|
||||
self.cap = None
|
||||
self.rtsp_client = None
|
||||
self.mutex = QMutex()
|
||||
self.condition = QWaitCondition()
|
||||
|
||||
# Configuration with safe defaults
|
||||
self.frame_interval = 1.0 / 30 # Default to 30 FPS
|
||||
self.max_reconnect_attempts = 10
|
||||
self.reconnect_delay = 2
|
||||
self.reconnect_backoff = 1.5 # Exponential backoff factor
|
||||
self.read_timeout = 5.0
|
||||
self.connection_timeout = 10
|
||||
self.max_consecutive_failures = 15
|
||||
self.health_check_interval = 5.0
|
||||
|
||||
# State tracking
|
||||
self.stream_type = None
|
||||
self.use_rtsp_lib = RTSP_LIB_AVAILABLE
|
||||
self.last_successful_frame = 0
|
||||
self.consecutive_failures = 0
|
||||
self.total_failures = 0
|
||||
self.total_frames = 0
|
||||
self.last_health_check = 0
|
||||
self.connection_attempts = 0
|
||||
|
||||
# Statistics
|
||||
self.stats = {
|
||||
'fps': 0,
|
||||
'total_frames': 0,
|
||||
'total_failures': 0,
|
||||
'connection_attempts': 0,
|
||||
'uptime': 0,
|
||||
'start_time': 0,
|
||||
'last_frame_time': 0
|
||||
}
|
||||
|
||||
def set_fps(self, fps):
|
||||
"""Set the target FPS for frame capture"""
|
||||
try:
|
||||
if fps > 0 and fps <= 120: # Reasonable bounds
|
||||
self.frame_interval = 1.0 / fps
|
||||
logger.info(f"Camera {self.camera_id}: FPS set to {fps}")
|
||||
else:
|
||||
logger.warning(f"Camera {self.camera_id}: Invalid FPS value {fps}")
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Error setting FPS: {e}")
|
||||
|
||||
def safe_emit(self, signal, *args):
|
||||
try:
|
||||
if self.isRunning():
|
||||
signal.emit(*args)
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Signal emit failed: {e}")
|
||||
|
||||
def update_stats(self):
|
||||
try:
|
||||
current_time = time.time()
|
||||
if self.stats['last_frame_time'] > 0:
|
||||
time_diff = current_time - self.stats['last_frame_time']
|
||||
if time_diff < 5: # Only update FPS if we have recent frames
|
||||
self.stats['fps'] = 1.0 / time_diff if time_diff > 0 else 0
|
||||
|
||||
self.stats['total_frames'] = self.total_frames
|
||||
self.stats['total_failures'] = self.total_failures
|
||||
self.stats['connection_attempts'] = self.connection_attempts
|
||||
self.stats['uptime'] = current_time - self.stats['start_time'] if self.stats['start_time'] > 0 else 0
|
||||
|
||||
self.safe_emit(self.stats_updated, self.camera_id, self.stats.copy())
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Stats update error: {e}")
|
||||
|
||||
def detect_stream_type(self, url_or_info):
|
||||
try:
|
||||
if isinstance(url_or_info, (int, str)):
|
||||
url_str = str(url_or_info).strip().lower()
|
||||
|
||||
if url_str.isdigit():
|
||||
return StreamType.LOCAL
|
||||
elif url_str.startswith('rtsp://'):
|
||||
return StreamType.RTSP
|
||||
elif url_str.startswith('net:'):
|
||||
return StreamType.NETWORK
|
||||
elif ':4747' in url_str or 'droidcam' in url_str:
|
||||
return StreamType.DROIDCAM
|
||||
elif url_str.startswith(('http://', 'https://')):
|
||||
return StreamType.HTTP_MJPEG
|
||||
else:
|
||||
# Try to parse as IP camera
|
||||
if any(x in url_str for x in ['.', ':']):
|
||||
return StreamType.IP_CAMERA
|
||||
return StreamType.LOCAL # Fallback
|
||||
|
||||
return StreamType.NETWORK
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Stream type detection failed: {e}")
|
||||
return StreamType.IP_CAMERA # Safe fallback
|
||||
|
||||
@staticmethod
|
||||
def validate_url(url):
|
||||
"""Safely validate and normalize URL format"""
|
||||
try:
|
||||
if not url or not isinstance(url, str):
|
||||
return None
|
||||
|
||||
url = url.strip()
|
||||
if not url:
|
||||
return None
|
||||
|
||||
# Parse the URL
|
||||
if not url.startswith(('http://', 'https://', 'rtsp://', 'rtmp://')):
|
||||
url = f"http://{url}"
|
||||
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
|
||||
if not parsed.netloc:
|
||||
return None
|
||||
|
||||
# Special handling for DroidCam
|
||||
if ':4747' in url and not url.endswith('/video'):
|
||||
base_url = f"{parsed.scheme}://{parsed.netloc}"
|
||||
return f"{base_url}/video"
|
||||
|
||||
return url
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"URL validation error: {e}")
|
||||
return None
|
||||
|
||||
def construct_camera_url(self, camera_info):
|
||||
"""Safely construct proper camera URL with authentication if needed"""
|
||||
try:
|
||||
if isinstance(camera_info, dict):
|
||||
url = camera_info.get('url', '')
|
||||
username = camera_info.get('username', '')
|
||||
password = camera_info.get('password', '')
|
||||
else:
|
||||
url = str(camera_info)
|
||||
username = ''
|
||||
password = ''
|
||||
|
||||
url = self.validate_url(url)
|
||||
if not url:
|
||||
return None
|
||||
|
||||
# Handle authentication
|
||||
if username and password:
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
if '@' not in parsed.netloc:
|
||||
auth = f"{urllib.parse.quote(username)}:{urllib.parse.quote(password)}"
|
||||
netloc = f"{auth}@{parsed.netloc}"
|
||||
url = urllib.parse.urlunparse(parsed._replace(netloc=netloc))
|
||||
|
||||
return url
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Error constructing camera URL: {e}")
|
||||
return None
|
||||
|
||||
def safe_capture_release(self):
|
||||
"""Safely release OpenCV capture"""
|
||||
try:
|
||||
if self.cap is not None:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
logger.debug(f"Camera {self.camera_id}: Capture released")
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Error releasing capture: {e}")
|
||||
finally:
|
||||
self.cap = None
|
||||
|
||||
def safe_rtsp_close(self):
|
||||
"""Safely close RTSP client"""
|
||||
try:
|
||||
if self.rtsp_client is not None:
|
||||
self.rtsp_client.close()
|
||||
self.rtsp_client = None
|
||||
logger.debug(f"Camera {self.camera_id}: RTSP client closed")
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Error closing RTSP client: {e}")
|
||||
finally:
|
||||
self.rtsp_client = None
|
||||
|
||||
def configure_capture(self, cap, stream_type):
|
||||
"""Safely configure VideoCapture object based on stream type"""
|
||||
try:
|
||||
# Common settings
|
||||
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
|
||||
|
||||
if stream_type == StreamType.LOCAL:
|
||||
cap.set(cv2.CAP_PROP_FPS, 30)
|
||||
|
||||
elif stream_type in [StreamType.RTSP, StreamType.IP_CAMERA]:
|
||||
# RTSP/IP camera optimizations
|
||||
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'H264'))
|
||||
cap.set(cv2.CAP_PROP_OPEN_TIMEOUT_MSEC, 5000)
|
||||
cap.set(cv2.CAP_PROP_READ_TIMEOUT_MSEC, 5000)
|
||||
|
||||
elif stream_type in [StreamType.HTTP_MJPEG, StreamType.DROIDCAM]:
|
||||
cap.set(cv2.CAP_PROP_OPEN_TIMEOUT_MSEC, 5000)
|
||||
cap.set(cv2.CAP_PROP_READ_TIMEOUT_MSEC, 5000)
|
||||
|
||||
logger.debug(f"Camera {self.camera_id}: Capture configured for {stream_type.value}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Camera {self.camera_id}: Could not configure capture settings: {e}")
|
||||
|
||||
def test_network_endpoint(self, url, timeout=3):
|
||||
"""Safely test if a network endpoint is accessible"""
|
||||
try:
|
||||
response = requests.head(url, timeout=timeout, allow_redirects=True)
|
||||
accessible = response.status_code in [200, 401, 403] # 401/403 means it's there but needs auth
|
||||
logger.debug(f"Camera {self.camera_id}: Network test for {url}: {accessible}")
|
||||
return accessible
|
||||
except requests.exceptions.RequestException:
|
||||
try:
|
||||
response = requests.get(url, timeout=timeout, stream=True)
|
||||
response.close()
|
||||
accessible = response.status_code in [200, 401, 403]
|
||||
logger.debug(f"Camera {self.camera_id}: Network test (GET) for {url}: {accessible}")
|
||||
return accessible
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Network test failed for {url}: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Network test error for {url}: {e}")
|
||||
return False
|
||||
|
||||
def connect_rtsp_with_library(self, url):
|
||||
"""Safely connect to RTSP stream using the rtsp library"""
|
||||
if not self.use_rtsp_lib:
|
||||
return False
|
||||
|
||||
try:
|
||||
logger.info(f"Camera {self.camera_id}: Attempting RTSP library connection...")
|
||||
self.rtsp_client = rtsp.Client(rtsp_server_uri=url, verbose=False)
|
||||
|
||||
# Test if connection works
|
||||
if self.rtsp_client.isOpened():
|
||||
# Try to read a frame with timeout
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < self.read_timeout:
|
||||
frame = self.rtsp_client.read()
|
||||
if frame is not None:
|
||||
logger.info(f"Camera {self.camera_id}: Successfully connected with rtsp library")
|
||||
return True
|
||||
time.sleep(0.1)
|
||||
|
||||
logger.warning(f"Camera {self.camera_id}: Failed to connect with rtsp library")
|
||||
self.safe_rtsp_close()
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Camera {self.camera_id}: RTSP library error: {e}")
|
||||
self.safe_rtsp_close()
|
||||
return False
|
||||
|
||||
def connect_rtsp_with_opencv(self, url):
|
||||
"""Safely connect to RTSP stream using OpenCV with different transport protocols"""
|
||||
import os
|
||||
|
||||
transports = ['tcp', 'udp', 'http']
|
||||
|
||||
for transport in transports:
|
||||
try:
|
||||
logger.info(f"Camera {self.camera_id}: Trying RTSP with {transport.upper()} transport...")
|
||||
|
||||
# Set FFMPEG options
|
||||
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = (
|
||||
f"rtsp_transport;{transport}|"
|
||||
f"timeout;5000000|"
|
||||
f"stimeout;5000000|"
|
||||
f"buffer_size;1024000"
|
||||
)
|
||||
|
||||
self.cap = cv2.VideoCapture(url, cv2.CAP_FFMPEG)
|
||||
self.configure_capture(self.cap, StreamType.RTSP)
|
||||
|
||||
if not self.cap.isOpened():
|
||||
logger.debug(f"Camera {self.camera_id}: Failed to open with {transport}")
|
||||
self.safe_capture_release()
|
||||
continue
|
||||
|
||||
# Try to read a frame with timeout
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < self.read_timeout:
|
||||
ret, frame = self.cap.read()
|
||||
if ret and frame is not None and frame.size > 0:
|
||||
logger.info(f"Camera {self.camera_id}: Successfully connected with {transport.upper()}")
|
||||
return True
|
||||
time.sleep(0.1)
|
||||
|
||||
logger.debug(f"Camera {self.camera_id}: Failed to read frame with {transport}")
|
||||
self.safe_capture_release()
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Error with {transport}: {e}")
|
||||
self.safe_capture_release()
|
||||
|
||||
return False
|
||||
|
||||
def connect_to_camera(self):
|
||||
"""Safely attempt to connect to the camera with enhanced retry logic"""
|
||||
self.connection_attempts += 1
|
||||
|
||||
for attempt in range(self.max_reconnect_attempts):
|
||||
try:
|
||||
# Clean up existing connections
|
||||
self.safe_capture_release()
|
||||
self.safe_rtsp_close()
|
||||
|
||||
# Determine camera source
|
||||
if isinstance(self.camera_info, str) and self.camera_info.startswith('net:'):
|
||||
name = self.camera_info[4:]
|
||||
detector = self.parent().detector if self.parent() else None
|
||||
|
||||
if not detector or name not in getattr(detector, 'network_cameras', {}):
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, f"Network camera {name} not found")
|
||||
time.sleep(self.reconnect_delay * (self.reconnect_backoff ** attempt))
|
||||
continue
|
||||
|
||||
camera_info = detector.network_cameras[name]
|
||||
url = self.construct_camera_url(camera_info)
|
||||
|
||||
if not url:
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, f"Invalid URL for {name}")
|
||||
time.sleep(self.reconnect_delay * (self.reconnect_backoff ** attempt))
|
||||
continue
|
||||
|
||||
self.stream_type = self.detect_stream_type(url)
|
||||
camera_source = url
|
||||
|
||||
else:
|
||||
if isinstance(self.camera_info, dict):
|
||||
url = self.construct_camera_url(self.camera_info)
|
||||
if not url:
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, "Invalid camera URL")
|
||||
time.sleep(self.reconnect_delay * (self.reconnect_backoff ** attempt))
|
||||
continue
|
||||
camera_source = url
|
||||
self.stream_type = self.detect_stream_type(url)
|
||||
else:
|
||||
camera_source = self.camera_info
|
||||
self.stream_type = self.detect_stream_type(camera_source)
|
||||
|
||||
if self.stream_type != StreamType.LOCAL:
|
||||
camera_source = self.validate_url(str(camera_source))
|
||||
if not camera_source:
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, "Invalid camera source")
|
||||
time.sleep(self.reconnect_delay * (self.reconnect_backoff ** attempt))
|
||||
continue
|
||||
|
||||
logger.info(f"Camera {self.camera_id}: Attempt {attempt + 1}/{self.max_reconnect_attempts} connecting to {self.stream_type.value}...")
|
||||
|
||||
# Test network endpoint for HTTP streams
|
||||
if self.stream_type in [StreamType.HTTP_MJPEG, StreamType.DROIDCAM, StreamType.IP_CAMERA]:
|
||||
if not self.test_network_endpoint(camera_source):
|
||||
logger.warning(f"Camera {self.camera_id}: Network endpoint not accessible")
|
||||
time.sleep(self.reconnect_delay * (self.reconnect_backoff ** attempt))
|
||||
continue
|
||||
|
||||
# Connect based on stream type
|
||||
success = False
|
||||
|
||||
if self.stream_type == StreamType.LOCAL:
|
||||
try:
|
||||
self.cap = cv2.VideoCapture(int(camera_source))
|
||||
self.configure_capture(self.cap, self.stream_type)
|
||||
|
||||
if self.cap.isOpened():
|
||||
# Test frame reading
|
||||
ret, frame = self.cap.read()
|
||||
if ret and frame is not None:
|
||||
success = True
|
||||
except Exception as e:
|
||||
logger.warning(f"Camera {self.camera_id}: Local camera error: {e}")
|
||||
|
||||
elif self.stream_type == StreamType.RTSP:
|
||||
# Try rtsp library first if available
|
||||
if self.use_rtsp_lib and self.connect_rtsp_with_library(camera_source):
|
||||
success = True
|
||||
elif self.connect_rtsp_with_opencv(camera_source):
|
||||
success = True
|
||||
|
||||
else:
|
||||
# HTTP MJPEG, DroidCam, IP Camera
|
||||
try:
|
||||
self.cap = cv2.VideoCapture(camera_source, cv2.CAP_FFMPEG)
|
||||
self.configure_capture(self.cap, self.stream_type)
|
||||
|
||||
if self.cap.isOpened():
|
||||
# Test frame reading with timeout
|
||||
start_time = time.time()
|
||||
ret, frame = False, None
|
||||
while time.time() - start_time < self.read_timeout:
|
||||
ret, frame = self.cap.read()
|
||||
if ret and frame is not None and frame.size > 0:
|
||||
success = True
|
||||
break
|
||||
time.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.warning(f"Camera {self.camera_id}: Network camera error: {e}")
|
||||
|
||||
if success:
|
||||
logger.info(f"Camera {self.camera_id}: Successfully connected")
|
||||
self.safe_emit(self.connection_status, self.camera_id, True, "Connected")
|
||||
self.consecutive_failures = 0
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Camera {self.camera_id}: Connection attempt {attempt + 1} failed")
|
||||
self.safe_capture_release()
|
||||
self.safe_rtsp_close()
|
||||
|
||||
if attempt < self.max_reconnect_attempts - 1:
|
||||
delay = self.reconnect_delay * (self.reconnect_backoff ** attempt)
|
||||
logger.info(f"Camera {self.camera_id}: Retrying in {delay:.1f}s...")
|
||||
time.sleep(delay)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Connection attempt {attempt + 1} error: {e}")
|
||||
self.safe_capture_release()
|
||||
self.safe_rtsp_close()
|
||||
|
||||
if attempt < self.max_reconnect_attempts - 1:
|
||||
time.sleep(self.reconnect_delay * (self.reconnect_backoff ** attempt))
|
||||
|
||||
logger.error(f"Camera {self.camera_id}: All connection attempts failed")
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, "Connection failed")
|
||||
self.safe_emit(self.error_occurred, self.camera_id, "Failed to connect after multiple attempts")
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
self.stats['start_time'] = time.time()
|
||||
|
||||
try:
|
||||
logger.info(f"Camera {self.camera_id}: Thread starting")
|
||||
|
||||
if not self.connect_to_camera():
|
||||
logger.error(f"Camera {self.camera_id}: Initial connection failed")
|
||||
return
|
||||
|
||||
self.running = True
|
||||
last_frame_time = 0
|
||||
self.last_health_check = time.time()
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
# Check if paused
|
||||
if self.paused:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
# Frame rate limiting
|
||||
current_time = time.time()
|
||||
if current_time - last_frame_time < self.frame_interval:
|
||||
time.sleep(0.001)
|
||||
continue
|
||||
|
||||
# Health check
|
||||
if current_time - self.last_health_check > self.health_check_interval:
|
||||
if self.consecutive_failures > self.max_consecutive_failures / 2:
|
||||
logger.warning(f"Camera {self.camera_id}: Health check failed, reconnecting...")
|
||||
if not self.connect_to_camera():
|
||||
break
|
||||
self.last_health_check = current_time
|
||||
|
||||
# Read frame based on connection type
|
||||
frame = None
|
||||
ret = False
|
||||
|
||||
try:
|
||||
if self.rtsp_client and self.rtsp_client.isOpened():
|
||||
frame = self.rtsp_client.read()
|
||||
ret = frame is not None
|
||||
if ret:
|
||||
# Convert PIL Image to numpy array if needed
|
||||
if hasattr(frame, 'size'): # Likely PIL Image
|
||||
frame = np.array(frame)
|
||||
if len(frame.shape) == 3 and frame.shape[2] == 3:
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
elif self.cap and self.cap.isOpened():
|
||||
ret, frame = self.cap.read()
|
||||
else:
|
||||
ret = False
|
||||
except Exception as e:
|
||||
logger.debug(f"Camera {self.camera_id}: Frame read error: {e}")
|
||||
ret = False
|
||||
|
||||
if ret and frame is not None and frame.size > 0:
|
||||
# Validate frame
|
||||
if (isinstance(frame, np.ndarray) and
|
||||
len(frame.shape) in [2, 3] and
|
||||
frame.shape[0] > 0 and frame.shape[1] > 0):
|
||||
|
||||
self.consecutive_failures = 0
|
||||
self.total_frames += 1
|
||||
self.stats['last_frame_time'] = current_time
|
||||
last_frame_time = current_time
|
||||
|
||||
self.safe_emit(self.frame_ready, self.camera_id, frame)
|
||||
self.update_stats()
|
||||
else:
|
||||
self.handle_frame_failure()
|
||||
else:
|
||||
self.handle_frame_failure()
|
||||
|
||||
# Brief sleep to prevent CPU overload
|
||||
time.sleep(0.001)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Main loop error: {e}")
|
||||
self.handle_frame_failure()
|
||||
time.sleep(0.1) # Longer sleep on error
|
||||
|
||||
except Exception as e:
|
||||
logger.critical(f"Camera {self.camera_id}: Critical thread error: {e}")
|
||||
self.safe_emit(self.error_occurred, self.camera_id, f"Thread crash: {str(e)}")
|
||||
finally:
|
||||
logger.info(f"Camera {self.camera_id}: Thread stopping")
|
||||
self.cleanup()
|
||||
|
||||
def handle_frame_failure(self):
|
||||
"""Handle frame reading failures with reconnection logic"""
|
||||
self.consecutive_failures += 1
|
||||
self.total_failures += 1
|
||||
|
||||
if self.consecutive_failures >= self.max_consecutive_failures:
|
||||
logger.warning(f"Camera {self.camera_id}: Too many failures, attempting reconnection...")
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, "Reconnecting...")
|
||||
|
||||
if not self.connect_to_camera():
|
||||
logger.error(f"Camera {self.camera_id}: Reconnection failed, stopping thread")
|
||||
self.running = False
|
||||
else:
|
||||
self.consecutive_failures = 0
|
||||
|
||||
def stop(self):
|
||||
"""Safely stop the thread"""
|
||||
logger.info(f"Camera {self.camera_id}: Stopping thread...")
|
||||
|
||||
self.mutex.lock()
|
||||
self.running = False
|
||||
self.mutex.unlock()
|
||||
|
||||
# Wake up thread if it's waiting
|
||||
self.condition.wakeAll()
|
||||
|
||||
if not self.wait(3000): # 3 second timeout
|
||||
logger.warning(f"Camera {self.camera_id}: Thread did not stop gracefully, terminating...")
|
||||
try:
|
||||
self.terminate()
|
||||
if not self.wait(1000):
|
||||
logger.error(f"Camera {self.camera_id}: Thread termination failed")
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Error during termination: {e}")
|
||||
else:
|
||||
logger.info(f"Camera {self.camera_id}: Thread stopped gracefully")
|
||||
|
||||
def pause(self):
|
||||
"""Pause frame capture"""
|
||||
self.paused = True
|
||||
logger.info(f"Camera {self.camera_id}: Paused")
|
||||
|
||||
def resume(self):
|
||||
"""Resume frame capture"""
|
||||
self.paused = False
|
||||
logger.info(f"Camera {self.camera_id}: Resumed")
|
||||
|
||||
def cleanup(self):
|
||||
"""Comprehensive cleanup of all resources"""
|
||||
logger.info(f"Camera {self.camera_id}: Cleaning up resources...")
|
||||
|
||||
try:
|
||||
self.running = False
|
||||
self.safe_capture_release()
|
||||
self.safe_rtsp_close()
|
||||
|
||||
self.safe_emit(self.connection_status, self.camera_id, False, "Disconnected")
|
||||
self.update_stats()
|
||||
|
||||
logger.info(f"Camera {self.camera_id}: Cleanup completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Camera {self.camera_id}: Cleanup error: {e}")
|
||||
|
||||
def get_status(self) -> Dict[str, Any]:
|
||||
"""Get current camera status"""
|
||||
return {
|
||||
'running': self.running,
|
||||
'paused': self.paused,
|
||||
'connected': (self.cap is not None and self.cap.isOpened()) or
|
||||
(self.rtsp_client is not None and self.rtsp_client.isOpened()),
|
||||
'stream_type': self.stream_type.value if self.stream_type else 'unknown',
|
||||
'consecutive_failures': self.consecutive_failures,
|
||||
'total_frames': self.total_frames,
|
||||
'total_failures': self.total_failures,
|
||||
'stats': self.stats.copy()
|
||||
}
|
||||
85
mucapy/CollpsibleDock.py
Normal file
85
mucapy/CollpsibleDock.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from PyQt5.QtCore import Qt, QTimer, QDir, QSize, QDateTime, QRect, QThread, pyqtSignal, QMutex, QObject, QEvent
|
||||
from PyQt5.QtGui import (QImage, QPixmap, QIcon, QColor, QKeySequence, QPainter,
|
||||
QPen, QBrush)
|
||||
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout,
|
||||
QWidget, QLabel, QPushButton, QComboBox, QSpinBox,
|
||||
QFileDialog, QMessageBox, QMenu, QAction, QActionGroup, QGridLayout, QGroupBox,
|
||||
QDockWidget, QScrollArea, QToolButton, QDialog,
|
||||
QShortcut, QListWidget, QFormLayout, QLineEdit,
|
||||
QCheckBox, QTabWidget, QListWidgetItem, QSplitter,
|
||||
QProgressBar, QSizePolicy)
|
||||
|
||||
class CollapsibleDock(QDockWidget):
|
||||
"""Custom dock widget with collapse/expand functionality"""
|
||||
|
||||
def __init__(self, title, parent=None):
|
||||
super().__init__(title, parent)
|
||||
self.setFeatures(QDockWidget.DockWidgetClosable |
|
||||
QDockWidget.DockWidgetMovable |
|
||||
QDockWidget.DockWidgetFloatable)
|
||||
# Allow docking only on sides to avoid central area clipping
|
||||
self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
|
||||
# Prefer keeping a minimum width but allow vertical expansion
|
||||
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)
|
||||
# Ensure the dock paints its own background (prevents visual bleed/clip)
|
||||
self.setAttribute(Qt.WA_StyledBackground, True)
|
||||
|
||||
# Create a widget for the title bar that contains both toggle button and close button
|
||||
title_widget = QWidget()
|
||||
title_layout = QHBoxLayout(title_widget)
|
||||
title_layout.setContentsMargins(0, 0, 0, 0)
|
||||
title_layout.setSpacing(0)
|
||||
# Ensure title bar doesn't force tiny width
|
||||
title_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
|
||||
|
||||
self.toggle_button = QToolButton()
|
||||
self.toggle_button.setIcon(QIcon.fromTheme("arrow-left"))
|
||||
self.toggle_button.setIconSize(QSize(16, 16))
|
||||
self.toggle_button.setStyleSheet("border: none;")
|
||||
self.toggle_button.clicked.connect(self.toggle_collapse)
|
||||
|
||||
title_layout.addWidget(self.toggle_button)
|
||||
title_layout.addStretch()
|
||||
|
||||
self.setTitleBarWidget(title_widget)
|
||||
self.collapsed = False
|
||||
self.original_size = None
|
||||
self.original_minimum_width = None
|
||||
self.original_maximum_width = None
|
||||
|
||||
def toggle_collapse(self):
|
||||
"""Toggle between collapsed and expanded states"""
|
||||
if self.collapsed:
|
||||
self.expand()
|
||||
else:
|
||||
self.collapse()
|
||||
|
||||
def collapse(self):
|
||||
"""Collapse the dock widget (fully hide)."""
|
||||
if not self.collapsed:
|
||||
self.original_size = self.size()
|
||||
self.original_minimum_width = self.minimumWidth()
|
||||
self.original_maximum_width = self.maximumWidth()
|
||||
# Fully hide the dock to avoid any clipping/overlap with camera panes
|
||||
self.setVisible(False)
|
||||
self.toggle_button.setIcon(QIcon.fromTheme("arrow-right"))
|
||||
self.collapsed = True
|
||||
|
||||
def expand(self):
|
||||
"""Expand (show) the dock widget"""
|
||||
if self.collapsed:
|
||||
# Restore previous constraints, falling back to sensible defaults
|
||||
minw = self.original_minimum_width if self.original_minimum_width is not None else 250
|
||||
self.setMinimumWidth(minw)
|
||||
self.setMaximumWidth(self.original_maximum_width if self.original_maximum_width is not None else 16777215)
|
||||
# Show and restore size
|
||||
self.setVisible(True)
|
||||
if self.original_size:
|
||||
self.resize(self.original_size)
|
||||
else:
|
||||
self.resize(max(minw, 250), self.height())
|
||||
# Make sure the dock is on top of central widgets
|
||||
self.raise_()
|
||||
self.toggle_button.setIcon(QIcon.fromTheme("arrow-left"))
|
||||
self.collapsed = False
|
||||
|
||||
61
mucapy/Config.py
Normal file
61
mucapy/Config.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
class Config:
|
||||
def __init__(self):
|
||||
# Use platform-specific user directory for config
|
||||
if sys.platform.startswith('win'):
|
||||
config_dir = os.path.join(os.environ.get('APPDATA', os.path.expanduser('~')), 'MuCaPy')
|
||||
pictures_dir = os.path.join(os.environ.get('USERPROFILE', os.path.expanduser('~')), 'Pictures', 'MuCaPy')
|
||||
else:
|
||||
config_dir = os.path.join(os.path.expanduser('~'), '.config', 'mucapy')
|
||||
pictures_dir = os.path.join(os.path.expanduser('~'), 'Pictures', 'MuCaPy')
|
||||
|
||||
# Create config directory if it doesn't exist
|
||||
os.makedirs(config_dir, exist_ok=True)
|
||||
|
||||
self.config_file = os.path.join(config_dir, 'config.json')
|
||||
self.settings = {
|
||||
'network_cameras': {}, # Store network cameras configuration
|
||||
'last_model_dir': '',
|
||||
'last_screenshot_dir': pictures_dir,
|
||||
'last_layout': 0,
|
||||
'last_fps': 10,
|
||||
'last_selected_cameras': [],
|
||||
'window_geometry': None,
|
||||
'confidence_threshold': 0.35,
|
||||
}
|
||||
self.load_config()
|
||||
|
||||
def load_config(self):
|
||||
"""Load configuration from JSON file"""
|
||||
try:
|
||||
if os.path.exists(self.config_file):
|
||||
with open(self.config_file, 'r') as f:
|
||||
loaded_settings = json.load(f)
|
||||
# Update settings while preserving default values for new keys
|
||||
self.settings.update(loaded_settings)
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
def save_config(self):
|
||||
"""Save configuration to JSON file"""
|
||||
try:
|
||||
# Ensure the file's directory exists
|
||||
os.makedirs(os.path.dirname(self.config_file), exist_ok=True)
|
||||
try:
|
||||
with open(self.config_file, 'w') as f:
|
||||
json.dump(self.settings, f, indent=4)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Error saving config: {e}")
|
||||
|
||||
def save_setting(self, key, value):
|
||||
"""Save a setting to configuration"""
|
||||
self.settings[key] = value
|
||||
self.save_config()
|
||||
|
||||
def load_setting(self, key, default=None):
|
||||
"""Load a setting from configuration"""
|
||||
return self.settings.get(key, default)
|
||||
143
mucapy/NetworkCameraDialog.py
Normal file
143
mucapy/NetworkCameraDialog.py
Normal file
@@ -0,0 +1,143 @@
|
||||
from PyQt5.QtCore import Qt, QTimer, QDir, QSize, QDateTime, QRect, QThread, pyqtSignal, QMutex, QObject, QEvent
|
||||
from PyQt5.QtGui import (QImage, QPixmap, QIcon, QColor, QKeySequence, QPainter,
|
||||
QPen, QBrush)
|
||||
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout,
|
||||
QWidget, QLabel, QPushButton, QComboBox, QSpinBox,
|
||||
QFileDialog, QMessageBox, QMenu, QAction, QActionGroup, QGridLayout, QGroupBox,
|
||||
QDockWidget, QScrollArea, QToolButton, QDialog,
|
||||
QShortcut, QListWidget, QFormLayout, QLineEdit,
|
||||
QCheckBox, QTabWidget, QListWidgetItem, QSplitter,
|
||||
QProgressBar, QSizePolicy)
|
||||
|
||||
from todopackage.todo import todo
|
||||
|
||||
class NetworkCameraDialog(QDialog):
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle("Network Camera Settings")
|
||||
self.setModal(True)
|
||||
self.resize(500, 400)
|
||||
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
# Instructions label
|
||||
instructions = QLabel(todo.get_instructions_CaSeDi_QLabel())
|
||||
|
||||
instructions.setWordWrap(True)
|
||||
layout.addWidget(instructions)
|
||||
|
||||
# Camera list
|
||||
self.camera_list = QListWidget()
|
||||
layout.addWidget(self.camera_list)
|
||||
|
||||
# Input fields
|
||||
form_layout = QFormLayout()
|
||||
|
||||
# Name and URL
|
||||
self.name_edit = QLineEdit()
|
||||
self.url_edit = QLineEdit()
|
||||
form_layout.addRow("Name:", self.name_edit)
|
||||
form_layout.addRow("URL:", self.url_edit)
|
||||
|
||||
# Authentication group
|
||||
auth_group = QGroupBox("Authentication")
|
||||
auth_layout = QVBoxLayout()
|
||||
|
||||
self.auth_checkbox = QCheckBox("Enable Authentication")
|
||||
self.auth_checkbox.stateChanged.connect(self.toggle_auth_fields)
|
||||
auth_layout.addWidget(self.auth_checkbox)
|
||||
|
||||
auth_form = QFormLayout()
|
||||
self.username_edit = QLineEdit()
|
||||
self.password_edit = QLineEdit()
|
||||
self.password_edit.setEchoMode(QLineEdit.Password)
|
||||
auth_form.addRow("Username:", self.username_edit)
|
||||
auth_form.addRow("Password:", self.password_edit)
|
||||
auth_layout.addLayout(auth_form)
|
||||
|
||||
auth_group.setLayout(auth_layout)
|
||||
form_layout.addRow(auth_group)
|
||||
|
||||
layout.addLayout(form_layout)
|
||||
|
||||
# Initially disable auth fields
|
||||
self.username_edit.setEnabled(False)
|
||||
self.password_edit.setEnabled(False)
|
||||
|
||||
# Buttons
|
||||
btn_layout = QHBoxLayout()
|
||||
add_btn = QPushButton("Add Camera")
|
||||
add_btn.clicked.connect(self.add_camera)
|
||||
remove_btn = QPushButton("Remove Camera")
|
||||
remove_btn.clicked.connect(self.remove_camera)
|
||||
close_btn = QPushButton("Close")
|
||||
close_btn.clicked.connect(self.accept)
|
||||
|
||||
btn_layout.addWidget(add_btn)
|
||||
btn_layout.addWidget(remove_btn)
|
||||
btn_layout.addWidget(close_btn)
|
||||
layout.addLayout(btn_layout)
|
||||
|
||||
self.detector = parent.detector if parent else None
|
||||
self.load_cameras()
|
||||
|
||||
def toggle_auth_fields(self, state):
|
||||
"""Enable/disable authentication fields based on checkbox state"""
|
||||
enabled = state == Qt.Checked
|
||||
self.username_edit.setEnabled(enabled)
|
||||
self.password_edit.setEnabled(enabled)
|
||||
if not enabled:
|
||||
self.username_edit.clear()
|
||||
self.password_edit.clear()
|
||||
|
||||
def load_cameras(self):
|
||||
"""Load saved network cameras into the list"""
|
||||
if not self.detector:
|
||||
return
|
||||
|
||||
self.camera_list.clear()
|
||||
for name, camera_info in self.detector.network_cameras.items():
|
||||
if isinstance(camera_info, dict):
|
||||
url = camera_info.get('url', '')
|
||||
has_auth = camera_info.get('username') is not None
|
||||
display_text = f"{name} ({url})"
|
||||
if has_auth:
|
||||
display_text += " [Auth]"
|
||||
else:
|
||||
# Handle old format where camera_info was just the URL
|
||||
display_text = f"{name} ({camera_info})"
|
||||
self.camera_list.addItem(display_text)
|
||||
|
||||
def add_camera(self):
|
||||
"""Add a new network camera"""
|
||||
name = self.name_edit.text().strip()
|
||||
url = self.url_edit.text().strip()
|
||||
|
||||
if not name or not url:
|
||||
QMessageBox.warning(self, "Error", "Please enter both name and URL")
|
||||
return
|
||||
|
||||
# Ensure URL has proper format for DroidCam
|
||||
if ':4747' in url:
|
||||
if not url.endswith('/video'):
|
||||
url = url.rstrip('/') + '/video'
|
||||
if not url.startswith('http://') and not url.startswith('https://'):
|
||||
url = 'http://' + url
|
||||
|
||||
if self.detector:
|
||||
print(f"Adding network camera: {name} with URL: {url}") # Debug print
|
||||
self.detector.add_network_camera(name, url)
|
||||
self.load_cameras()
|
||||
self.name_edit.clear()
|
||||
self.url_edit.clear()
|
||||
|
||||
def remove_camera(self):
|
||||
"""Remove selected network camera"""
|
||||
current = self.camera_list.currentItem()
|
||||
if not current:
|
||||
return
|
||||
|
||||
name = current.text().split(" (")[0]
|
||||
if self.detector:
|
||||
self.detector.remove_network_camera(name)
|
||||
self.load_cameras()
|
||||
573
mucapy/PopoutWindow.py
Normal file
573
mucapy/PopoutWindow.py
Normal file
@@ -0,0 +1,573 @@
|
||||
from PyQt5.QtCore import Qt, QTimer, QDateTime, QRect, QEvent, QPointF, QPoint, QThread, pyqtSignal
|
||||
from PyQt5.QtGui import (QImage, QPixmap, QColor, QKeySequence, QPainter,
|
||||
QPen, QBrush, QFont)
|
||||
from PyQt5.QtWidgets import (QMainWindow, QVBoxLayout, QHBoxLayout,
|
||||
QWidget, QLabel, QScrollArea, QToolButton,
|
||||
QShortcut, QFileDialog, QMessageBox)
|
||||
import math
|
||||
import os
|
||||
|
||||
class SaveWorker(QThread):
|
||||
"""Worker thread for saving snapshots and recordings"""
|
||||
finished = pyqtSignal(bool, str)
|
||||
progress = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, frames, folder, cam_id, is_recording=False):
|
||||
super().__init__()
|
||||
self.frames = frames
|
||||
self.folder = folder
|
||||
self.cam_id = cam_id
|
||||
self.is_recording = is_recording
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
timestamp = QDateTime.currentDateTime().toString('yyyyMMdd_hhmmss')
|
||||
|
||||
if self.is_recording:
|
||||
for i, frame in enumerate(self.frames):
|
||||
filename = os.path.join(self.folder, f"cam_{self.cam_id}_rec_{timestamp}_frame_{i:04d}.png")
|
||||
frame.save(filename)
|
||||
self.progress.emit(i + 1, len(self.frames))
|
||||
self.finished.emit(True, f"Saved {len(self.frames)} frames")
|
||||
else:
|
||||
filename = os.path.join(self.folder, f"camera_{self.cam_id}_snapshot_{timestamp}.png")
|
||||
self.frames[0].save(filename)
|
||||
self.finished.emit(True, f"Saved to: {filename}")
|
||||
|
||||
except Exception as e:
|
||||
self.finished.emit(False, str(e))
|
||||
|
||||
|
||||
class PopoutWindow(QMainWindow):
|
||||
"""Enhanced popout window with touch support, pinch zoom, and security guard features"""
|
||||
|
||||
def __init__(self, source_display: QLabel, cam_id=None, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle(f"Camera {cam_id}" if cam_id is not None else "Camera")
|
||||
self.source_display = source_display
|
||||
self.cam_id = cam_id
|
||||
self.zoom_factor = 1.0
|
||||
self.min_zoom = 0.2
|
||||
self.max_zoom = 10.0
|
||||
self.paused = False
|
||||
self.show_grid = False
|
||||
self.show_timestamp = True
|
||||
self.show_crosshair = False
|
||||
self.enhance_mode = 0
|
||||
self.recording = False
|
||||
self.record_frames = []
|
||||
self.setMinimumSize(640, 480)
|
||||
|
||||
# Touch gesture state
|
||||
self.setAttribute(Qt.WA_AcceptTouchEvents, True)
|
||||
self.gesture_type = None # 'pinch', 'pan', or None
|
||||
|
||||
# Pinch zoom state
|
||||
self.pinch_initial_distance = 0
|
||||
self.pinch_initial_zoom = 1.0
|
||||
|
||||
# Pan state (both touch and mouse)
|
||||
self.pan_active = False
|
||||
self.pan_last_pos = None
|
||||
|
||||
# Worker thread for saving
|
||||
self.save_worker = None
|
||||
|
||||
# Snapshot history
|
||||
self.snapshot_count = 0
|
||||
|
||||
# Central area: toolbar + scrollable image label
|
||||
central = QWidget()
|
||||
vbox = QVBoxLayout(central)
|
||||
vbox.setContentsMargins(4, 4, 4, 4)
|
||||
vbox.setSpacing(4)
|
||||
|
||||
# Main toolbar
|
||||
toolbar = QHBoxLayout()
|
||||
|
||||
# Zoom controls
|
||||
self.btn_zoom_in = QToolButton()
|
||||
self.btn_zoom_in.setText("+")
|
||||
self.btn_zoom_in.setMinimumSize(44, 44)
|
||||
|
||||
self.btn_zoom_out = QToolButton()
|
||||
self.btn_zoom_out.setText("-")
|
||||
self.btn_zoom_out.setMinimumSize(44, 44)
|
||||
|
||||
self.btn_zoom_reset = QToolButton()
|
||||
self.btn_zoom_reset.setText("100%")
|
||||
self.btn_zoom_reset.setMinimumSize(44, 44)
|
||||
|
||||
# Playback controls
|
||||
self.btn_pause = QToolButton()
|
||||
self.btn_pause.setText("Pause")
|
||||
self.btn_pause.setMinimumSize(60, 44)
|
||||
|
||||
self.btn_snapshot = QToolButton()
|
||||
self.btn_snapshot.setText("Snapshot")
|
||||
self.btn_snapshot.setMinimumSize(60, 44)
|
||||
|
||||
# Overlay controls
|
||||
self.btn_grid = QToolButton()
|
||||
self.btn_grid.setText("Grid")
|
||||
self.btn_grid.setMinimumSize(60, 44)
|
||||
|
||||
self.btn_time = QToolButton()
|
||||
self.btn_time.setText("Time")
|
||||
self.btn_time.setMinimumSize(60, 44)
|
||||
|
||||
self.btn_crosshair = QToolButton()
|
||||
self.btn_crosshair.setText("Crosshair")
|
||||
self.btn_crosshair.setMinimumSize(60, 44)
|
||||
|
||||
self.btn_enhance = QToolButton()
|
||||
self.btn_enhance.setText("Enhance")
|
||||
self.btn_enhance.setMinimumSize(60, 44)
|
||||
|
||||
self.btn_record = QToolButton()
|
||||
self.btn_record.setText("Record")
|
||||
self.btn_record.setMinimumSize(60, 44)
|
||||
|
||||
self.btn_full = QToolButton()
|
||||
self.btn_full.setText("Fullscreen")
|
||||
self.btn_full.setMinimumSize(60, 44)
|
||||
|
||||
for b in [self.btn_zoom_out, self.btn_zoom_in, self.btn_zoom_reset,
|
||||
self.btn_pause, self.btn_snapshot, self.btn_grid,
|
||||
self.btn_time, self.btn_crosshair, self.btn_enhance,
|
||||
self.btn_record, self.btn_full]:
|
||||
toolbar.addWidget(b)
|
||||
toolbar.addStretch(1)
|
||||
vbox.addLayout(toolbar)
|
||||
|
||||
# Status bar
|
||||
status_layout = QHBoxLayout()
|
||||
self.status_label = QLabel(f"Camera {cam_id if cam_id else 'View'} | Zoom: 100%")
|
||||
self.status_label.setStyleSheet("color: #666; font-size: 10px;")
|
||||
status_layout.addWidget(self.status_label)
|
||||
status_layout.addStretch(1)
|
||||
vbox.addLayout(status_layout)
|
||||
|
||||
# Scroll area for panning
|
||||
self.image_label = QLabel()
|
||||
self.image_label.setAlignment(Qt.AlignCenter)
|
||||
self.image_label.setAttribute(Qt.WA_AcceptTouchEvents, True)
|
||||
|
||||
self.scroll = QScrollArea()
|
||||
self.scroll.setWidget(self.image_label)
|
||||
self.scroll.setWidgetResizable(True)
|
||||
self.scroll.setAttribute(Qt.WA_AcceptTouchEvents, True)
|
||||
vbox.addWidget(self.scroll, 1)
|
||||
|
||||
self.setCentralWidget(central)
|
||||
|
||||
# Keyboard shortcuts
|
||||
QShortcut(QKeySequence("+"), self, activated=self.zoom_in)
|
||||
QShortcut(QKeySequence("-"), self, activated=self.zoom_out)
|
||||
QShortcut(QKeySequence("0"), self, activated=self.reset_zoom)
|
||||
QShortcut(QKeySequence(Qt.Key_Escape), self, activated=self.close)
|
||||
QShortcut(QKeySequence("F"), self, activated=self.toggle_fullscreen)
|
||||
QShortcut(QKeySequence("Ctrl+S"), self, activated=self.take_snapshot)
|
||||
QShortcut(QKeySequence("Space"), self, activated=self.toggle_pause)
|
||||
QShortcut(QKeySequence("G"), self, activated=self.toggle_grid)
|
||||
QShortcut(QKeySequence("T"), self, activated=self.toggle_timestamp)
|
||||
QShortcut(QKeySequence("C"), self, activated=self.toggle_crosshair)
|
||||
|
||||
# Connect buttons
|
||||
self.btn_zoom_in.clicked.connect(self.zoom_in)
|
||||
self.btn_zoom_out.clicked.connect(self.zoom_out)
|
||||
self.btn_zoom_reset.clicked.connect(self.reset_zoom)
|
||||
self.btn_pause.clicked.connect(self.toggle_pause)
|
||||
self.btn_snapshot.clicked.connect(self.take_snapshot)
|
||||
self.btn_grid.clicked.connect(self.toggle_grid)
|
||||
self.btn_time.clicked.connect(self.toggle_timestamp)
|
||||
self.btn_crosshair.clicked.connect(self.toggle_crosshair)
|
||||
self.btn_enhance.clicked.connect(self.cycle_enhance)
|
||||
self.btn_record.clicked.connect(self.toggle_recording)
|
||||
self.btn_full.clicked.connect(self.toggle_fullscreen)
|
||||
|
||||
# Timer to refresh from source display
|
||||
self.timer = QTimer(self)
|
||||
self.timer.timeout.connect(self.refresh_frame)
|
||||
self.timer.start(40)
|
||||
|
||||
# Event filter
|
||||
self.image_label.installEventFilter(self)
|
||||
self.scroll.viewport().installEventFilter(self)
|
||||
|
||||
# Initial render
|
||||
self.refresh_frame()
|
||||
|
||||
def closeEvent(self, event):
|
||||
if hasattr(self, 'timer') and self.timer:
|
||||
self.timer.stop()
|
||||
if self.save_worker and self.save_worker.isRunning():
|
||||
self.save_worker.wait()
|
||||
return super().closeEvent(event)
|
||||
|
||||
def toggle_fullscreen(self):
|
||||
if self.isFullScreen():
|
||||
self.showNormal()
|
||||
self.btn_full.setText("Fullscreen")
|
||||
else:
|
||||
self.showFullScreen()
|
||||
self.btn_full.setText("Windowed")
|
||||
|
||||
def toggle_pause(self):
|
||||
self.paused = not self.paused
|
||||
self.btn_pause.setText("Resume" if self.paused else "Pause")
|
||||
self.update_status()
|
||||
|
||||
def toggle_grid(self):
|
||||
self.show_grid = not self.show_grid
|
||||
self.btn_grid.setStyleSheet("background-color: #4CAF50;" if self.show_grid else "")
|
||||
|
||||
def toggle_timestamp(self):
|
||||
self.show_timestamp = not self.show_timestamp
|
||||
self.btn_time.setStyleSheet("background-color: #4CAF50;" if self.show_timestamp else "")
|
||||
|
||||
def toggle_crosshair(self):
|
||||
self.show_crosshair = not self.show_crosshair
|
||||
self.btn_crosshair.setStyleSheet("background-color: #4CAF50;" if self.show_crosshair else "")
|
||||
|
||||
def cycle_enhance(self):
|
||||
self.enhance_mode = (self.enhance_mode + 1) % 4
|
||||
enhance_names = ["Off", "Sharpen", "Edges", "Denoise"]
|
||||
self.btn_enhance.setText(f"Enhance: {enhance_names[self.enhance_mode]}")
|
||||
if self.enhance_mode == 0:
|
||||
self.btn_enhance.setStyleSheet("")
|
||||
else:
|
||||
self.btn_enhance.setStyleSheet("background-color: #2196F3;")
|
||||
self.update_status()
|
||||
|
||||
def toggle_recording(self):
|
||||
self.recording = not self.recording
|
||||
if self.recording:
|
||||
self.record_frames = []
|
||||
self.btn_record.setText("Stop Rec")
|
||||
self.btn_record.setStyleSheet("background-color: #f44336;")
|
||||
else:
|
||||
self.btn_record.setText("Record")
|
||||
self.btn_record.setStyleSheet("")
|
||||
if self.record_frames:
|
||||
self.save_recording()
|
||||
self.update_status()
|
||||
|
||||
def save_recording(self):
|
||||
if not self.record_frames:
|
||||
return
|
||||
|
||||
try:
|
||||
reply = QMessageBox.question(
|
||||
self,
|
||||
"Save Recording",
|
||||
f"Save {len(self.record_frames)} recorded frames as images?",
|
||||
QMessageBox.Yes | QMessageBox.No
|
||||
)
|
||||
|
||||
if reply == QMessageBox.Yes:
|
||||
folder = QFileDialog.getExistingDirectory(self, "Select Folder for Recording")
|
||||
if folder:
|
||||
self.save_worker = SaveWorker(self.record_frames.copy(), folder, self.cam_id, True)
|
||||
self.save_worker.finished.connect(self.on_save_finished)
|
||||
self.save_worker.progress.connect(self.on_save_progress)
|
||||
self.save_worker.start()
|
||||
self.status_label.setText("Saving recording...")
|
||||
except Exception as e:
|
||||
print(f"Error saving recording: {e}")
|
||||
|
||||
self.record_frames = []
|
||||
|
||||
def on_save_progress(self, current, total):
|
||||
self.status_label.setText(f"Saving: {current}/{total} frames")
|
||||
|
||||
def on_save_finished(self, success, message):
|
||||
if success:
|
||||
QMessageBox.information(self, "Recording Saved", message)
|
||||
else:
|
||||
QMessageBox.warning(self, "Save Error", f"Error saving: {message}")
|
||||
self.update_status()
|
||||
|
||||
def take_snapshot(self):
|
||||
if hasattr(self.source_display, 'take_screenshot'):
|
||||
self.source_display.take_screenshot()
|
||||
return
|
||||
|
||||
pm = self.current_pixmap()
|
||||
if pm and not pm.isNull():
|
||||
try:
|
||||
self.snapshot_count += 1
|
||||
timestamp = QDateTime.currentDateTime().toString('yyyyMMdd_hhmmss')
|
||||
filename = f"camera_{self.cam_id}_snapshot_{timestamp}.png"
|
||||
file_path, _ = QFileDialog.getSaveFileName(self, "Save Snapshot", filename, "Images (*.png *.jpg)")
|
||||
if file_path:
|
||||
pm.save(file_path)
|
||||
QMessageBox.information(self, "Snapshot Saved", f"Saved to: {file_path}")
|
||||
except Exception as e:
|
||||
print(f"Error saving snapshot: {e}")
|
||||
|
||||
def current_pixmap(self):
|
||||
return self.source_display.pixmap()
|
||||
|
||||
def refresh_frame(self):
|
||||
if self.paused:
|
||||
return
|
||||
|
||||
pm = self.current_pixmap()
|
||||
if not pm or pm.isNull():
|
||||
return
|
||||
|
||||
try:
|
||||
# Store frame for recording
|
||||
if self.recording:
|
||||
self.record_frames.append(pm.copy())
|
||||
if len(self.record_frames) > 300:
|
||||
self.record_frames.pop(0)
|
||||
|
||||
# Create overlays
|
||||
image = pm.toImage().convertToFormat(QImage.Format_ARGB32)
|
||||
painter = QPainter(image)
|
||||
painter.setRenderHint(QPainter.Antialiasing)
|
||||
|
||||
# Timestamp overlay
|
||||
if self.show_timestamp:
|
||||
ts = QDateTime.currentDateTime().toString('yyyy-MM-dd hh:mm:ss')
|
||||
cam_text = f"Cam {self.cam_id} | {ts}" if self.cam_id else ts
|
||||
|
||||
font = QFont()
|
||||
font.setPointSize(11)
|
||||
font.setBold(True)
|
||||
painter.setFont(font)
|
||||
|
||||
metrics = painter.fontMetrics()
|
||||
w = metrics.width(cam_text) + 16
|
||||
h = metrics.height() + 10
|
||||
rect = QRect(10, 10, w, h)
|
||||
painter.setPen(Qt.NoPen)
|
||||
painter.setBrush(QBrush(QColor(0, 0, 0, 180)))
|
||||
painter.drawRoundedRect(rect, 6, 6)
|
||||
painter.setPen(QPen(QColor(255, 255, 255)))
|
||||
painter.drawText(rect, Qt.AlignCenter, cam_text)
|
||||
|
||||
# Grid overlay
|
||||
if self.show_grid:
|
||||
painter.setPen(QPen(QColor(255, 255, 255, 120), 2))
|
||||
img_w = image.width()
|
||||
img_h = image.height()
|
||||
|
||||
for i in range(1, 3):
|
||||
x = int(img_w * i / 3)
|
||||
y = int(img_h * i / 3)
|
||||
painter.drawLine(x, 0, x, img_h)
|
||||
painter.drawLine(0, y, img_w, y)
|
||||
|
||||
painter.setPen(QPen(QColor(255, 255, 0, 100), 1, Qt.DashLine))
|
||||
painter.drawLine(img_w // 2, 0, img_w // 2, img_h)
|
||||
painter.drawLine(0, img_h // 2, img_w, img_h // 2)
|
||||
|
||||
# Crosshair overlay
|
||||
if self.show_crosshair:
|
||||
painter.setPen(QPen(QColor(255, 0, 0, 200), 2))
|
||||
img_w = image.width()
|
||||
img_h = image.height()
|
||||
center_x = img_w // 2
|
||||
center_y = img_h // 2
|
||||
size = 30
|
||||
|
||||
painter.drawLine(center_x - size, center_y, center_x + size, center_y)
|
||||
painter.drawLine(center_x, center_y - size, center_x, center_y + size)
|
||||
|
||||
painter.setPen(QPen(QColor(255, 0, 0, 150), 1))
|
||||
painter.drawEllipse(QPoint(center_x, center_y), 5, 5)
|
||||
|
||||
# Recording indicator
|
||||
if self.recording:
|
||||
painter.setPen(Qt.NoPen)
|
||||
painter.setBrush(QBrush(QColor(255, 0, 0, 200)))
|
||||
painter.drawEllipse(image.width() - 30, 10, 15, 15)
|
||||
|
||||
painter.setPen(QPen(QColor(255, 255, 255)))
|
||||
font = QFont()
|
||||
font.setPointSize(9)
|
||||
font.setBold(True)
|
||||
painter.setFont(font)
|
||||
painter.drawText(QRect(image.width() - 100, 25, 90, 20),
|
||||
Qt.AlignRight, f"REC {len(self.record_frames)}")
|
||||
|
||||
painter.end()
|
||||
|
||||
composed = QPixmap.fromImage(image)
|
||||
|
||||
# Apply zoom
|
||||
if self.zoom_factor != 1.0:
|
||||
target_w = int(composed.width() * self.zoom_factor)
|
||||
target_h = int(composed.height() * self.zoom_factor)
|
||||
composed = composed.scaled(target_w, target_h, Qt.KeepAspectRatio, Qt.SmoothTransformation)
|
||||
|
||||
self.image_label.setPixmap(composed)
|
||||
self.update_cursor()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in refresh_frame: {e}")
|
||||
|
||||
def update_status(self):
|
||||
try:
|
||||
zoom_pct = int(self.zoom_factor * 100)
|
||||
status_parts = [f"Camera {self.cam_id if self.cam_id else 'View'}", f"Zoom: {zoom_pct}%"]
|
||||
|
||||
if self.paused:
|
||||
status_parts.append("PAUSED")
|
||||
if self.recording:
|
||||
status_parts.append(f"RECORDING ({len(self.record_frames)} frames)")
|
||||
if self.enhance_mode != 0:
|
||||
enhance_names = ["Off", "Sharpen", "Edges", "Denoise"]
|
||||
status_parts.append(f"Enhance: {enhance_names[self.enhance_mode]}")
|
||||
|
||||
self.status_label.setText(" | ".join(status_parts))
|
||||
except Exception as e:
|
||||
print(f"Error updating status: {e}")
|
||||
|
||||
def zoom_in(self):
|
||||
self.set_zoom(self.zoom_factor * 1.3)
|
||||
|
||||
def zoom_out(self):
|
||||
self.set_zoom(self.zoom_factor / 1.3)
|
||||
|
||||
def reset_zoom(self):
|
||||
self.set_zoom(1.0)
|
||||
|
||||
def set_zoom(self, z):
|
||||
z = max(self.min_zoom, min(self.max_zoom, z))
|
||||
if abs(z - self.zoom_factor) > 1e-4:
|
||||
self.zoom_factor = z
|
||||
self.refresh_frame()
|
||||
self.update_status()
|
||||
self.update_cursor()
|
||||
|
||||
def can_pan(self):
|
||||
if not self.image_label.pixmap():
|
||||
return False
|
||||
vp = self.scroll.viewport().size()
|
||||
pm = self.image_label.pixmap().size()
|
||||
return pm.width() > vp.width() or pm.height() > vp.height()
|
||||
|
||||
def update_cursor(self):
|
||||
if self.can_pan():
|
||||
self.image_label.setCursor(Qt.OpenHandCursor if not self.pan_active else Qt.ClosedHandCursor)
|
||||
else:
|
||||
self.image_label.setCursor(Qt.ArrowCursor)
|
||||
|
||||
def distance(self, p1: QPointF, p2: QPointF) -> float:
|
||||
dx = p2.x() - p1.x()
|
||||
dy = p2.y() - p1.y()
|
||||
return math.sqrt(dx * dx + dy * dy)
|
||||
|
||||
def event(self, event):
|
||||
"""Handle touch events"""
|
||||
try:
|
||||
if event.type() == QEvent.TouchBegin:
|
||||
points = event.touchPoints()
|
||||
|
||||
if len(points) == 2:
|
||||
self.gesture_type = 'pinch'
|
||||
p1 = points[0].pos()
|
||||
p2 = points[1].pos()
|
||||
self.pinch_initial_distance = self.distance(p1, p2)
|
||||
self.pinch_initial_zoom = self.zoom_factor
|
||||
|
||||
elif len(points) == 1 and self.can_pan():
|
||||
self.gesture_type = 'pan'
|
||||
self.pan_active = True
|
||||
self.pan_last_pos = points[0].pos()
|
||||
self.update_cursor()
|
||||
|
||||
event.accept()
|
||||
return True
|
||||
|
||||
elif event.type() == QEvent.TouchUpdate:
|
||||
points = event.touchPoints()
|
||||
|
||||
if self.gesture_type == 'pinch' and len(points) == 2:
|
||||
p1 = points[0].pos()
|
||||
p2 = points[1].pos()
|
||||
current_distance = self.distance(p1, p2)
|
||||
|
||||
if self.pinch_initial_distance > 10:
|
||||
scale_factor = current_distance / self.pinch_initial_distance
|
||||
new_zoom = self.pinch_initial_zoom * scale_factor
|
||||
self.set_zoom(new_zoom)
|
||||
|
||||
elif self.gesture_type == 'pan' and len(points) == 1 and self.can_pan():
|
||||
current_pos = points[0].pos()
|
||||
if self.pan_last_pos is not None:
|
||||
delta = current_pos - self.pan_last_pos
|
||||
hbar = self.scroll.horizontalScrollBar()
|
||||
vbar = self.scroll.verticalScrollBar()
|
||||
hbar.setValue(int(hbar.value() - delta.x()))
|
||||
vbar.setValue(int(vbar.value() - delta.y()))
|
||||
|
||||
self.pan_last_pos = current_pos
|
||||
|
||||
event.accept()
|
||||
return True
|
||||
|
||||
elif event.type() in (QEvent.TouchEnd, QEvent.TouchCancel):
|
||||
self.gesture_type = None
|
||||
self.pan_active = False
|
||||
self.pan_last_pos = None
|
||||
self.pinch_initial_distance = 0
|
||||
self.update_cursor()
|
||||
|
||||
event.accept()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in touch event: {e}")
|
||||
|
||||
return super().event(event)
|
||||
|
||||
def eventFilter(self, obj, event):
|
||||
"""Handle mouse events"""
|
||||
try:
|
||||
if obj is self.image_label or obj is self.scroll.viewport():
|
||||
if event.type() == QEvent.Wheel:
|
||||
delta = event.angleDelta().y()
|
||||
if delta > 0:
|
||||
self.zoom_in()
|
||||
else:
|
||||
self.zoom_out()
|
||||
return True
|
||||
|
||||
if event.type() == QEvent.MouseButtonPress and event.button() == Qt.LeftButton:
|
||||
if self.can_pan():
|
||||
self.pan_active = True
|
||||
self.pan_last_pos = event.pos()
|
||||
self.update_cursor()
|
||||
return True
|
||||
|
||||
if event.type() == QEvent.MouseMove and self.pan_active:
|
||||
if self.pan_last_pos is not None:
|
||||
delta = event.pos() - self.pan_last_pos
|
||||
hbar = self.scroll.horizontalScrollBar()
|
||||
vbar = self.scroll.verticalScrollBar()
|
||||
hbar.setValue(hbar.value() - delta.x())
|
||||
vbar.setValue(vbar.value() - delta.y())
|
||||
self.pan_last_pos = event.pos()
|
||||
return True
|
||||
|
||||
if event.type() == QEvent.MouseButtonRelease and event.button() == Qt.LeftButton:
|
||||
if self.pan_active:
|
||||
self.pan_active = False
|
||||
self.pan_last_pos = None
|
||||
self.update_cursor()
|
||||
return True
|
||||
|
||||
if event.type() == QEvent.Leave:
|
||||
self.pan_active = False
|
||||
self.pan_last_pos = None
|
||||
self.update_cursor()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in eventFilter: {e}")
|
||||
|
||||
return super().eventFilter(obj, event)
|
||||
434
mucapy/WebServer.py
Normal file
434
mucapy/WebServer.py
Normal file
@@ -0,0 +1,434 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import json
|
||||
import threading
|
||||
from aiohttp import web
|
||||
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
|
||||
from aiortc.contrib.media import MediaBlackhole
|
||||
from av import VideoFrame
|
||||
import cv2
|
||||
import numpy as np
|
||||
from typing import Dict, Set, Optional, List
|
||||
import fractions
|
||||
|
||||
# Import the CameraThread from your existing code
|
||||
from CameraThread import CameraThread
|
||||
from PyQt5.QtCore import QMutex
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def cors_middleware(request, handler):
|
||||
"""Global CORS middleware to ensure preflight (OPTIONS) and all responses include CORS headers."""
|
||||
cors_headers = {
|
||||
'Access-Control-Allow-Origin': '*',
|
||||
'Access-Control-Allow-Methods': 'GET,POST,PUT,DELETE,OPTIONS',
|
||||
'Access-Control-Allow-Headers': 'Content-Type,Authorization',
|
||||
}
|
||||
|
||||
if request.method == 'OPTIONS':
|
||||
preflight_headers = cors_headers.copy()
|
||||
preflight_headers.update({
|
||||
'Access-Control-Max-Age': '3600',
|
||||
'Access-Control-Allow-Headers': 'Content-Type, Authorization, X-Requested-With',
|
||||
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
|
||||
})
|
||||
return web.Response(status=200, headers=preflight_headers)
|
||||
|
||||
try:
|
||||
resp = await handler(request)
|
||||
if resp is None:
|
||||
resp = web.Response(status=204)
|
||||
if isinstance(resp, web.StreamResponse):
|
||||
for k, v in cors_headers.items():
|
||||
if k not in resp.headers:
|
||||
resp.headers[k] = v
|
||||
return resp
|
||||
except Exception as e:
|
||||
logger.error(f"Error in request handler: {e}")
|
||||
error_resp = web.Response(status=500, text=str(e))
|
||||
for k, v in cors_headers.items():
|
||||
error_resp.headers[k] = v
|
||||
return error_resp
|
||||
|
||||
|
||||
async def favicon_handler(request):
|
||||
return web.Response(status=204)
|
||||
|
||||
|
||||
class VideoStreamTrackFromFrames(VideoStreamTrack):
|
||||
def __init__(self, camera_index: int):
|
||||
super().__init__()
|
||||
self.camera_index = camera_index
|
||||
self.frame = None
|
||||
self.frame_lock = asyncio.Lock()
|
||||
|
||||
async def recv(self):
|
||||
pts, time_base = await self.next_timestamp()
|
||||
|
||||
async with self.frame_lock:
|
||||
if self.frame is None:
|
||||
# Create a test pattern to verify the stream is working
|
||||
frame = self._create_test_pattern()
|
||||
else:
|
||||
frame = self.frame
|
||||
|
||||
# Convert BGR to RGB for WebRTC
|
||||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
video_frame = VideoFrame.from_ndarray(frame_rgb, format='rgb24')
|
||||
video_frame.pts = pts
|
||||
video_frame.time_base = time_base
|
||||
|
||||
return video_frame
|
||||
|
||||
def _create_test_pattern(self):
|
||||
"""Create a test pattern with camera index for debugging"""
|
||||
height, width = 480, 640
|
||||
frame = np.zeros((height, width, 3), dtype=np.uint8)
|
||||
|
||||
# Add colored background based on camera index
|
||||
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
|
||||
color = colors[self.camera_index % len(colors)]
|
||||
frame[:, :] = color
|
||||
|
||||
# Add some text and shapes
|
||||
cv2.rectangle(frame, (50, 50), (width-50, height-50), (255, 255, 255), 2)
|
||||
cv2.putText(frame, f"Camera {self.camera_index}", (width//4, height//2),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
||||
cv2.putText(frame, "Waiting for frames...", (width//4, height//2 + 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1)
|
||||
cv2.putText(frame, f"Time: {time.strftime('%H:%M:%S')}", (width//4, height//2 + 80),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
||||
|
||||
return frame
|
||||
|
||||
async def set_frame(self, frame):
|
||||
async with self.frame_lock:
|
||||
if frame is not None and isinstance(frame, np.ndarray):
|
||||
# Make sure we have a valid frame with proper dimensions
|
||||
if len(frame.shape) == 3 and frame.shape[2] == 3:
|
||||
self.frame = frame.copy()
|
||||
else:
|
||||
logger.warning(f"Invalid frame format: {frame.shape if hasattr(frame, 'shape') else 'No shape'}")
|
||||
self.frame = None
|
||||
else:
|
||||
self.frame = None
|
||||
|
||||
|
||||
class WebRTCVideoServer:
|
||||
def __init__(self, main_window=None):
|
||||
self.tracks: Dict[int, VideoStreamTrackFromFrames] = {}
|
||||
self.pcs: Set[RTCPeerConnection] = set()
|
||||
self.main_window = main_window
|
||||
self.camera_threads: Dict[int, CameraThread] = {}
|
||||
self.latest_frames: Dict[int, np.ndarray] = {}
|
||||
self.frame_lock = QMutex()
|
||||
self.active_camera_sources: List[str] = []
|
||||
self.app = web.Application(middlewares=[cors_middleware])
|
||||
self.runner = None
|
||||
self.site = None
|
||||
self.is_running = False
|
||||
self._loop = None
|
||||
self._server_thread = None
|
||||
self._frame_update_task = None
|
||||
self._setup_routes()
|
||||
logger.info("WebRTCVideoServer initialized")
|
||||
|
||||
def _setup_routes(self):
|
||||
self.app.router.add_post(r'/offer/index{index:[0-9]+}', self.offer_handler)
|
||||
self.app.router.add_post(r'/offer/{index:[0-9]+}', self.offer_handler)
|
||||
self.app.router.add_get('/favicon.ico', favicon_handler)
|
||||
self.app.router.add_get('/status', self.status_handler)
|
||||
logger.info("Routes configured")
|
||||
|
||||
async def status_handler(self, request):
|
||||
return web.json_response({
|
||||
'status': 'running' if self.is_running else 'stopped',
|
||||
'tracks': len(self.tracks),
|
||||
'connections': len(self.pcs),
|
||||
'active_cameras': list(self.tracks.keys()),
|
||||
'camera_sources': self.active_camera_sources
|
||||
})
|
||||
|
||||
async def offer_handler(self, request):
|
||||
headers = {}
|
||||
logger.info(f"New WebRTC offer from {request.remote}")
|
||||
|
||||
try:
|
||||
params = await request.json()
|
||||
sdp = params.get('sdp')
|
||||
offer_type = params.get('type')
|
||||
if not sdp or not offer_type:
|
||||
return web.json_response({'error': 'Missing "sdp" or "type"'}, status=400, headers=headers)
|
||||
|
||||
try:
|
||||
index = int(request.match_info.get('index', 0))
|
||||
except Exception:
|
||||
return web.json_response({'error': 'Invalid camera index'}, status=400, headers=headers)
|
||||
|
||||
# Check if this camera index exists
|
||||
if index >= len(self.active_camera_sources):
|
||||
return web.json_response({'error': f'Camera index {index} not available'}, status=404, headers=headers)
|
||||
|
||||
logger.info(f"WebRTC connection for camera index {index} (Source: {self.active_camera_sources[index]})")
|
||||
|
||||
offer = RTCSessionDescription(sdp=sdp, type=offer_type)
|
||||
pc = RTCPeerConnection()
|
||||
self.pcs.add(pc)
|
||||
|
||||
@pc.on("connectionstatechange")
|
||||
async def on_connectionstatechange():
|
||||
logger.info(f"Connection state: {pc.connectionState}")
|
||||
if pc.connectionState in ("failed", "closed"):
|
||||
await self.cleanup_peer_connection(pc)
|
||||
|
||||
await pc.setRemoteDescription(offer)
|
||||
|
||||
# Create or get track for this camera index
|
||||
if index not in self.tracks:
|
||||
self.tracks[index] = VideoStreamTrackFromFrames(index)
|
||||
logger.info(f"Created track for camera index {index}")
|
||||
|
||||
track = self.tracks[index]
|
||||
|
||||
# Try to attach to existing transceiver first
|
||||
attached = False
|
||||
for t in pc.getTransceivers():
|
||||
if t.kind == 'video':
|
||||
try:
|
||||
if t.sender is not None:
|
||||
await t.sender.replace_track(track)
|
||||
attached = True
|
||||
logger.info("Attached track to existing transceiver")
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if not attached:
|
||||
try:
|
||||
pc.addTrack(track)
|
||||
logger.info('Added track via addTrack fallback')
|
||||
except Exception as ex:
|
||||
logger.debug(f'addTrack fallback failed: {ex}')
|
||||
pc.addTransceiver(track, direction='sendonly')
|
||||
|
||||
answer = await pc.createAnswer()
|
||||
await pc.setLocalDescription(answer)
|
||||
|
||||
response_data = {
|
||||
'sdp': pc.localDescription.sdp,
|
||||
'type': pc.localDescription.type
|
||||
}
|
||||
|
||||
logger.info(f"Sending answer for camera index {index}")
|
||||
return web.json_response(response_data, headers=headers)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error handling WebRTC offer")
|
||||
return web.json_response({'error': str(e)}, status=500, headers=headers)
|
||||
|
||||
def setup_cameras(self, camera_sources: List[str]):
|
||||
"""Setup camera threads for WebRTC streaming"""
|
||||
self.stop_cameras() # Stop any existing cameras
|
||||
|
||||
self.active_camera_sources = camera_sources
|
||||
|
||||
for i, source in enumerate(camera_sources):
|
||||
try:
|
||||
# Create camera thread with unique ID (offset by 1000 to avoid conflicts with main app)
|
||||
thread_id = i + 1000
|
||||
thread = CameraThread(thread_id, source, parent=None)
|
||||
thread.set_fps(15) # Set reasonable FPS for streaming
|
||||
thread.frame_ready.connect(self._on_frame_ready)
|
||||
thread.error_occurred.connect(self._on_camera_error)
|
||||
|
||||
self.camera_threads[i] = thread
|
||||
self.latest_frames[i] = None
|
||||
thread.start()
|
||||
|
||||
logger.info(f"WebRTC camera thread started for source {i}: {source}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to start WebRTC camera thread for source {i}: {e}")
|
||||
|
||||
def _on_frame_ready(self, cam_id, frame):
|
||||
"""Handle frame from camera thread"""
|
||||
# Adjust camera ID back to our index
|
||||
actual_index = cam_id - 1000
|
||||
if 0 <= actual_index < len(self.active_camera_sources):
|
||||
self.frame_lock.lock()
|
||||
try:
|
||||
self.latest_frames[actual_index] = frame.copy()
|
||||
finally:
|
||||
self.frame_lock.unlock()
|
||||
|
||||
def _on_camera_error(self, cam_id, message):
|
||||
"""Handle camera errors"""
|
||||
actual_index = cam_id - 1000
|
||||
logger.error(f"WebRTC camera {actual_index} error: {message}")
|
||||
|
||||
def get_frame(self, index: int) -> Optional[np.ndarray]:
|
||||
"""Get frame for specific camera index"""
|
||||
self.frame_lock.lock()
|
||||
try:
|
||||
frame = self.latest_frames.get(index)
|
||||
if frame is not None:
|
||||
return frame.copy()
|
||||
finally:
|
||||
self.frame_lock.unlock()
|
||||
return None
|
||||
|
||||
def stop_cameras(self):
|
||||
"""Stop all camera threads"""
|
||||
for thread in self.camera_threads.values():
|
||||
try:
|
||||
thread.stop()
|
||||
thread.wait(2000) # Wait up to 2 seconds for thread to finish
|
||||
except Exception as e:
|
||||
logger.error(f"Error stopping camera thread: {e}")
|
||||
|
||||
self.camera_threads.clear()
|
||||
self.latest_frames.clear()
|
||||
logger.info("WebRTC camera threads stopped")
|
||||
|
||||
async def cleanup_peer_connection(self, pc):
|
||||
try:
|
||||
self.pcs.discard(pc)
|
||||
await pc.close()
|
||||
logger.info("Cleaned up peer connection")
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up peer connection: {e}")
|
||||
|
||||
async def start_server(self):
|
||||
"""Async method to start the server"""
|
||||
if self.is_running:
|
||||
logger.info("Server already running")
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info("Starting WebRTC server...")
|
||||
self.runner = web.AppRunner(self.app)
|
||||
await self.runner.setup()
|
||||
self.site = web.TCPSite(self.runner, '0.0.0.0', 1337)
|
||||
await self.site.start()
|
||||
self.is_running = True
|
||||
|
||||
# Start frame update loop
|
||||
self._frame_update_task = asyncio.create_task(self._frame_update_loop())
|
||||
|
||||
logger.info("WebRTC server started successfully on port 1337")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to start server: {e}")
|
||||
self.is_running = False
|
||||
raise
|
||||
|
||||
async def _frame_update_loop(self):
|
||||
"""Background task to update tracks with frames from camera threads"""
|
||||
while self.is_running:
|
||||
try:
|
||||
# Update each track with its corresponding camera frame
|
||||
for index in self.tracks.keys():
|
||||
if index < len(self.active_camera_sources):
|
||||
frame = self.get_frame(index)
|
||||
if frame is not None and isinstance(frame, np.ndarray):
|
||||
if len(frame.shape) == 3 and frame.shape[2] == 3:
|
||||
# Check if frame is not just a black/empty frame
|
||||
if frame.mean() > 10:
|
||||
await self.tracks[index].set_frame(frame)
|
||||
else:
|
||||
logger.debug(f"Frame from camera {index} appears to be black/empty")
|
||||
|
||||
# Sleep to match camera FPS
|
||||
await asyncio.sleep(0.033) # ~30 FPS
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in frame update loop: {e}")
|
||||
await asyncio.sleep(1) # Wait longer on error
|
||||
|
||||
async def stop_server(self):
|
||||
"""Async method to stop the server"""
|
||||
logger.info("Stopping WebRTC server...")
|
||||
self.is_running = False
|
||||
|
||||
# Stop frame update task
|
||||
if self._frame_update_task:
|
||||
self._frame_update_task.cancel()
|
||||
try:
|
||||
await self._frame_update_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._frame_update_task = None
|
||||
|
||||
# Stop camera threads
|
||||
self.stop_cameras()
|
||||
|
||||
try:
|
||||
# Close all peer connections
|
||||
close_tasks = [self.cleanup_peer_connection(pc) for pc in list(self.pcs)]
|
||||
if close_tasks:
|
||||
await asyncio.gather(*close_tasks, return_exceptions=True)
|
||||
|
||||
if self.site:
|
||||
await self.site.stop()
|
||||
if self.runner:
|
||||
await self.runner.cleanup()
|
||||
|
||||
# Clear all tracks
|
||||
self.tracks.clear()
|
||||
self.active_camera_sources.clear()
|
||||
|
||||
logger.info("WebRTC server stopped")
|
||||
except Exception as e:
|
||||
logger.error(f"Error stopping server: {e}")
|
||||
|
||||
def start(self, camera_sources: List[str] = None):
|
||||
"""Synchronous start method for main thread"""
|
||||
if self.is_running:
|
||||
logger.info("Server already running")
|
||||
return
|
||||
|
||||
# Setup cameras if sources provided
|
||||
if camera_sources:
|
||||
self.setup_cameras(camera_sources)
|
||||
|
||||
def run_server():
|
||||
self._loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._loop)
|
||||
try:
|
||||
self._loop.run_until_complete(self.start_server())
|
||||
self._loop.run_forever()
|
||||
except Exception as e:
|
||||
logger.error(f"Server thread error: {e}")
|
||||
self.is_running = False
|
||||
finally:
|
||||
if self._loop.is_running():
|
||||
self._loop.stop()
|
||||
self._loop.close()
|
||||
|
||||
self._server_thread = threading.Thread(target=run_server, daemon=True)
|
||||
self._server_thread.start()
|
||||
|
||||
# Wait briefly for server to initialize
|
||||
import time
|
||||
for _ in range(10):
|
||||
if self.is_running:
|
||||
break
|
||||
time.sleep(0.1)
|
||||
|
||||
def stop(self):
|
||||
"""Synchronous stop method for main thread"""
|
||||
if not self.is_running or not self._loop:
|
||||
return
|
||||
|
||||
try:
|
||||
# Schedule stop in server thread's event loop
|
||||
future = asyncio.run_coroutine_threadsafe(self.stop_server(), self._loop)
|
||||
future.result(timeout=5)
|
||||
except Exception as e:
|
||||
logger.error(f"Error stopping server: {e}")
|
||||
finally:
|
||||
self._server_thread = None
|
||||
self._loop = None
|
||||
468
mucapy/YoloClass.py
Normal file
468
mucapy/YoloClass.py
Normal file
@@ -0,0 +1,468 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import platform
|
||||
import os
|
||||
import subprocess
|
||||
from PyQt5.QtCore import Qt, QTimer, QDir, QSize, QDateTime, QRect, QThread, pyqtSignal, QMutex, QObject, QEvent
|
||||
from PyQt5.QtGui import (QImage, QPixmap, QIcon, QColor, QKeySequence, QPainter,
|
||||
QPen, QBrush)
|
||||
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout,
|
||||
QWidget, QLabel, QPushButton, QComboBox, QSpinBox,
|
||||
QFileDialog, QMessageBox, QMenu, QAction, QActionGroup, QGridLayout, QGroupBox,
|
||||
QDockWidget, QScrollArea, QToolButton, QDialog,
|
||||
QShortcut, QListWidget, QFormLayout, QLineEdit,
|
||||
QCheckBox, QTabWidget, QListWidgetItem, QSplitter,
|
||||
QProgressBar, QSizePolicy)
|
||||
|
||||
from CameraThread import CameraThread
|
||||
from Config import Config
|
||||
import sys
|
||||
from CameraScanThread import CameraScanThread
|
||||
class MultiCamYOLODetector(QObject):
|
||||
cameras_scanned = pyqtSignal(list, dict) # Emits (available_cameras, index_to_name)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.cameras = []
|
||||
self.camera_threads = {} # Dictionary to store camera threads
|
||||
self.net = None
|
||||
self.classes = []
|
||||
self.colors = []
|
||||
self.target_fps = 10
|
||||
self.last_frame_time = 0
|
||||
self.frame_interval = 1.0 / self.target_fps
|
||||
self.available_cameras = []
|
||||
self.model_dir = ""
|
||||
self.cuda_available = self.check_cuda()
|
||||
self.config = Config()
|
||||
self.latest_frames = {} # Store latest frames from each camera
|
||||
self.frame_lock = QMutex() # Mutex for thread-safe frame access
|
||||
self.scan_thread = None # Background scanner thread
|
||||
self.camera_names = {} # Mapping index->friendly name (best effort)
|
||||
|
||||
# Load settings
|
||||
self.confidence_threshold = self.config.load_setting('confidence_threshold', 0.35)
|
||||
self.network_cameras = self.config.load_setting('network_cameras', {})
|
||||
self.target_fps = self.config.load_setting('last_fps', 10)
|
||||
self.frame_interval = 1.0 / self.target_fps
|
||||
|
||||
# Load last used model if available
|
||||
last_model = self.config.load_setting('last_model_dir')
|
||||
if last_model and os.path.exists(last_model):
|
||||
self.load_yolo_model(last_model)
|
||||
|
||||
def check_cuda(self):
|
||||
"""Check if CUDA is available"""
|
||||
try:
|
||||
count = cv2.cuda.getCudaEnabledDeviceCount()
|
||||
return count > 0
|
||||
except:
|
||||
return False
|
||||
|
||||
def add_network_camera(self, name, url):
|
||||
"""Add a network camera to the saved list"""
|
||||
self.network_cameras[name] = url
|
||||
self.config.save_setting('network_cameras', self.network_cameras)
|
||||
|
||||
def remove_network_camera(self, name):
|
||||
"""Remove a network camera from the saved list"""
|
||||
if name in self.network_cameras:
|
||||
del self.network_cameras[name]
|
||||
self.config.save_setting('network_cameras', self.network_cameras)
|
||||
|
||||
def get_platform_backend(self):
|
||||
"""Get appropriate video capture backend for current platform"""
|
||||
try:
|
||||
if sys.platform.startswith('win'):
|
||||
return cv2.CAP_DSHOW
|
||||
elif sys.platform.startswith('darwin'):
|
||||
return cv2.CAP_AVFOUNDATION
|
||||
else:
|
||||
return cv2.CAP_V4L2
|
||||
except Exception:
|
||||
# Fallback to auto-detect if constants are missing
|
||||
return cv2.CAP_ANY
|
||||
|
||||
def get_camera_names_windows(self, cams):
|
||||
"""Get camera names on Windows using DirectShow (COM)."""
|
||||
names = {}
|
||||
|
||||
import platform
|
||||
if platform.system().lower() != "windows":
|
||||
for c in cams:
|
||||
names[c] = None
|
||||
return names
|
||||
|
||||
try:
|
||||
import comtypes
|
||||
from comtypes import GUID, POINTER, HRESULT, COMMETHOD, BSTR
|
||||
from ctypes import c_ulong, byref
|
||||
from comtypes.automation import VARIANT
|
||||
|
||||
# GUIDs
|
||||
CLSID_SystemDeviceEnum = GUID("{62BE5D10-60EB-11D0-BD3B-00A0C911CE86}")
|
||||
CLSID_VideoInputDeviceCategory = GUID("{860BB310-5D01-11D0-BD3B-00A0C911CE86}")
|
||||
IID_ICreateDevEnum = GUID("{29840822-5B84-11D0-BD3B-00A0C911CE86}")
|
||||
IID_IPropertyBag = GUID("{55272A00-42CB-11CE-8135-00AA004BB851}")
|
||||
|
||||
# Interfaces
|
||||
class IEnumMoniker(comtypes.IUnknown):
|
||||
_iid_ = GUID("{00000102-0000-0000-C000-000000000046}")
|
||||
_methods_ = [
|
||||
COMMETHOD([], HRESULT, 'Next',
|
||||
(['in'], c_ulong, 'celt'),
|
||||
(['out'], POINTER(POINTER(comtypes.IUnknown)), 'rgelt'),
|
||||
(['out'], POINTER(c_ulong), 'pceltFetched')),
|
||||
]
|
||||
|
||||
class IPropertyBag(comtypes.IUnknown):
|
||||
_iid_ = IID_IPropertyBag
|
||||
_methods_ = [
|
||||
COMMETHOD([], HRESULT, 'Read',
|
||||
(['in'], BSTR, 'pszPropName'),
|
||||
(['in', 'out'], POINTER(VARIANT), 'pVar'),
|
||||
(['in'], POINTER(comtypes.IUnknown), 'pErrorLog')),
|
||||
]
|
||||
|
||||
class ICreateDevEnum(comtypes.IUnknown):
|
||||
_iid_ = IID_ICreateDevEnum
|
||||
_methods_ = [
|
||||
COMMETHOD([], HRESULT, "CreateClassEnumerator",
|
||||
(['in'], POINTER(GUID), 'clsidDeviceClass'),
|
||||
(['out'], POINTER(POINTER(IEnumMoniker)), 'ppEnumMoniker'))
|
||||
]
|
||||
|
||||
comtypes.CoInitialize()
|
||||
dev_enum = comtypes.CoCreateInstance(
|
||||
CLSID_SystemDeviceEnum,
|
||||
interface=ICreateDevEnum
|
||||
)
|
||||
|
||||
enum_moniker = POINTER(IEnumMoniker)()
|
||||
hr = dev_enum.CreateClassEnumerator(
|
||||
CLSID_VideoInputDeviceCategory, # pass GUID directly, no byref
|
||||
byref(enum_moniker) # output pointer is byref
|
||||
)
|
||||
|
||||
if hr != 0 or not enum_moniker:
|
||||
raise RuntimeError("No video devices found")
|
||||
|
||||
device_names = []
|
||||
fetched = c_ulong()
|
||||
moniker = POINTER(comtypes.IUnknown)()
|
||||
while enum_moniker.Next(1, byref(moniker), byref(fetched)) == 0:
|
||||
prop_bag = moniker.BindToStorage(None, None, IPropertyBag)
|
||||
if prop_bag:
|
||||
name_var = VARIANT()
|
||||
if prop_bag.Read("FriendlyName", byref(name_var), None) == 0:
|
||||
device_names.append(str(name_var.value))
|
||||
moniker = POINTER(comtypes.IUnknown)() # release
|
||||
|
||||
# map to cams
|
||||
idx_only = [c for c in cams if not c.startswith("net:") and not c.startswith("/dev/")]
|
||||
for i, cam in enumerate(idx_only):
|
||||
names[cam] = device_names[i] if i < len(device_names) else None
|
||||
|
||||
except Exception as e:
|
||||
print(f"get_camera_names_windows failed: {e}")
|
||||
for c in cams:
|
||||
names[c] = None
|
||||
|
||||
return names
|
||||
|
||||
def start_camera_scan(self, max_to_check=10):
|
||||
"""Start background camera scan; emits cameras_scanned when done."""
|
||||
try:
|
||||
if self.scan_thread and self.scan_thread.isRunning():
|
||||
# Already scanning; ignore
|
||||
return False
|
||||
self.scan_thread = CameraScanThread(self, max_to_check)
|
||||
self.scan_thread.scan_finished.connect(self._on_scan_finished)
|
||||
self.scan_thread.start()
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Failed to start camera scan: {e}")
|
||||
return False
|
||||
|
||||
def _on_scan_finished(self, cams, names):
|
||||
# Store and forward via public signal
|
||||
self.available_cameras = cams or []
|
||||
self.camera_names = names or {}
|
||||
self.cameras_scanned.emit(self.available_cameras, self.camera_names)
|
||||
|
||||
def scan_for_cameras_windows(self, max_to_check=10):
|
||||
"""Enhanced camera detection for Windows with multiple backend support"""
|
||||
windows_cameras = []
|
||||
backends_to_try = [
|
||||
(cv2.CAP_DSHOW, "DSHOW"),
|
||||
(cv2.CAP_MSMF, "MSMF"),
|
||||
(cv2.CAP_ANY, "ANY")
|
||||
]
|
||||
for backend, backend_name in backends_to_try:
|
||||
print(f"Trying {backend_name} backend...")
|
||||
for i in range(max_to_check):
|
||||
try:
|
||||
cap = cv2.VideoCapture(i, backend)
|
||||
if cap.isOpened():
|
||||
ret, frame = cap.read()
|
||||
if ret and frame is not None:
|
||||
camera_id = f"{backend_name.lower()}:{i}"
|
||||
if str(i) not in windows_cameras:
|
||||
windows_cameras.append(str(i))
|
||||
print(f"Found camera {i} via {backend_name}")
|
||||
cap.release()
|
||||
else:
|
||||
cap.release()
|
||||
except Exception as e:
|
||||
print(f"Error checking camera {i} with {backend_name}: {e}")
|
||||
continue
|
||||
return windows_cameras
|
||||
|
||||
def scan_for_cameras(self, max_to_check=10):
|
||||
"""Check for available cameras with platform-specific backends"""
|
||||
self.available_cameras = []
|
||||
|
||||
print(f"Scanning for cameras on {sys.platform}...")
|
||||
|
||||
# Platform-specific detection
|
||||
if sys.platform.startswith('win'):
|
||||
cameras_found = self.scan_for_cameras_windows(max_to_check)
|
||||
self.available_cameras.extend(cameras_found)
|
||||
else:
|
||||
# Linux/Unix/macOS detection
|
||||
backend = cv2.CAP_AVFOUNDATION if sys.platform.startswith('darwin') else cv2.CAP_V4L2
|
||||
for i in range(max_to_check):
|
||||
try:
|
||||
cap = cv2.VideoCapture(i, backend)
|
||||
if cap.isOpened():
|
||||
ret, frame = cap.read()
|
||||
if ret and frame is not None:
|
||||
self.available_cameras.append(str(i))
|
||||
cap.release()
|
||||
except Exception as e:
|
||||
print(f"Error checking camera {i}: {e}")
|
||||
continue
|
||||
|
||||
# Linux device paths
|
||||
if sys.platform.startswith('linux'):
|
||||
v4l_paths = [f"/dev/video{i}" for i in range(max_to_check)]
|
||||
for path in v4l_paths:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
cap = cv2.VideoCapture(path, cv2.CAP_V4L2)
|
||||
if cap.isOpened() and path not in self.available_cameras:
|
||||
self.available_cameras.append(path)
|
||||
cap.release()
|
||||
except Exception as e:
|
||||
print(f"Error checking device {path}: {e}")
|
||||
|
||||
# Add network cameras
|
||||
network_count = 0
|
||||
for name, url in self.network_cameras.items():
|
||||
self.available_cameras.append(f"net:{name}")
|
||||
network_count += 1
|
||||
|
||||
print(
|
||||
f"Scan complete: Found {len(self.available_cameras) - network_count} local and {network_count} network cameras")
|
||||
return self.available_cameras
|
||||
|
||||
def load_yolo_model(self, model_dir):
|
||||
"""Load YOLO model from selected directory with better error handling"""
|
||||
self.model_dir = model_dir
|
||||
try:
|
||||
# Find model files in the directory
|
||||
weights = [f for f in os.listdir(model_dir) if f.endswith(('.weights', '.onnx'))]
|
||||
configs = [f for f in os.listdir(model_dir) if f.endswith('.cfg')]
|
||||
classes = [f for f in os.listdir(model_dir) if f.endswith('.names')]
|
||||
|
||||
if not weights or not configs or not classes:
|
||||
return False
|
||||
|
||||
# Use the first found files
|
||||
weights_path = os.path.join(model_dir, weights[0])
|
||||
config_path = os.path.join(model_dir, configs[0])
|
||||
classes_path = os.path.join(model_dir, classes[0])
|
||||
|
||||
self.net = cv2.dnn.readNet(weights_path, config_path)
|
||||
|
||||
# Set backend based on availability
|
||||
if self.cuda_available:
|
||||
try:
|
||||
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
|
||||
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
|
||||
except:
|
||||
# Fall back to CPU if CUDA fails
|
||||
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
|
||||
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
|
||||
else:
|
||||
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
|
||||
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
|
||||
|
||||
try:
|
||||
with open(classes_path, 'r') as f:
|
||||
self.classes = f.read().strip().split('\n')
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
np.random.seed(42)
|
||||
self.colors = np.random.randint(0, 255, size=(len(self.classes), 3), dtype='uint8')
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error loading YOLO model: {e}")
|
||||
return False
|
||||
|
||||
def connect_cameras(self, camera_paths):
|
||||
"""Connect to multiple cameras using background threads for smooth UI"""
|
||||
self.disconnect_cameras()
|
||||
|
||||
# Prepare internal state
|
||||
self.cameras = [] # store identifiers/paths only
|
||||
self.latest_frames = {}
|
||||
|
||||
# Start one CameraThread per camera
|
||||
for cam_index, cam_path in enumerate(camera_paths):
|
||||
try:
|
||||
thread = CameraThread(cam_index, cam_path, parent=self.parent())
|
||||
thread.set_fps(self.target_fps)
|
||||
thread.frame_ready.connect(self._on_frame_ready)
|
||||
thread.error_occurred.connect(self._on_camera_error)
|
||||
self.camera_threads[cam_index] = thread
|
||||
self.cameras.append(cam_path)
|
||||
self.latest_frames[cam_index] = None
|
||||
thread.start()
|
||||
print(f"Started camera thread for {cam_path}")
|
||||
except Exception as e:
|
||||
print(f"Error starting camera thread for {cam_path}: {e}")
|
||||
|
||||
success_count = len(self.camera_threads)
|
||||
print(f"Camera connection summary: {success_count}/{len(camera_paths)} camera threads started")
|
||||
return success_count > 0
|
||||
|
||||
def disconnect_cameras(self):
|
||||
"""Disconnect all cameras (stop threads)"""
|
||||
# Stop and remove threads
|
||||
for idx, thread in list(self.camera_threads.items()):
|
||||
try:
|
||||
thread.stop()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
thread.deleteLater()
|
||||
except Exception:
|
||||
pass
|
||||
self.camera_threads.clear()
|
||||
self.cameras = []
|
||||
# Clear cached frames
|
||||
self.frame_lock.lock()
|
||||
try:
|
||||
self.latest_frames = {}
|
||||
finally:
|
||||
self.frame_lock.unlock()
|
||||
|
||||
def _on_frame_ready(self, cam_id, frame):
|
||||
"""Cache latest frame from a camera thread (non-blocking for UI)."""
|
||||
self.frame_lock.lock()
|
||||
try:
|
||||
# Store a copy to avoid data races if producer reuses buffers
|
||||
self.latest_frames[cam_id] = frame.copy()
|
||||
finally:
|
||||
self.frame_lock.unlock()
|
||||
|
||||
def _on_camera_error(self, cam_id, message):
|
||||
print(f"Camera {cam_id} error: {message}")
|
||||
|
||||
def get_frames(self):
|
||||
"""Return latest frames without blocking the GUI thread."""
|
||||
frames = []
|
||||
# Snapshot current frames under lock
|
||||
self.frame_lock.lock()
|
||||
try:
|
||||
for i, _ in enumerate(self.cameras):
|
||||
frm = self.latest_frames.get(i)
|
||||
if frm is None:
|
||||
frames.append(np.zeros((720, 1280, 3), dtype=np.uint8))
|
||||
else:
|
||||
frames.append(frm.copy())
|
||||
finally:
|
||||
self.frame_lock.unlock()
|
||||
|
||||
# Optionally run detection on the copies
|
||||
parent_window = self.parent()
|
||||
if parent_window and self.net is not None and parent_window.detection_enabled:
|
||||
processed = []
|
||||
for f in frames:
|
||||
try:
|
||||
processed.append(self.get_detections(f))
|
||||
except Exception:
|
||||
processed.append(f)
|
||||
return processed
|
||||
|
||||
return frames
|
||||
|
||||
def get_detections(self, frame):
|
||||
"""Perform YOLO object detection on a frame with error handling"""
|
||||
if self.net is None:
|
||||
return frame
|
||||
|
||||
try:
|
||||
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
|
||||
self.net.setInput(blob)
|
||||
|
||||
# Get output layer names compatible with different OpenCV versions
|
||||
try:
|
||||
layer_names = self.net.getLayerNames()
|
||||
output_layers = [layer_names[i - 1] for i in self.net.getUnconnectedOutLayers()]
|
||||
except:
|
||||
output_layers = self.net.getUnconnectedOutLayersNames()
|
||||
|
||||
outputs = self.net.forward(output_layers)
|
||||
|
||||
boxes = []
|
||||
confidences = []
|
||||
class_ids = []
|
||||
|
||||
for output in outputs:
|
||||
for detection in output:
|
||||
scores = detection[5:]
|
||||
class_id = np.argmax(scores)
|
||||
confidence = scores[class_id]
|
||||
|
||||
if confidence > self.confidence_threshold: # Use configurable threshold
|
||||
box = detection[0:4] * np.array([frame.shape[1], frame.shape[0],
|
||||
frame.shape[1], frame.shape[0]])
|
||||
(centerX, centerY, width, height) = box.astype('int')
|
||||
x = int(centerX - (width / 2))
|
||||
y = int(centerY - (height / 2))
|
||||
|
||||
boxes.append([x, y, int(width), int(height)])
|
||||
confidences.append(float(confidence))
|
||||
class_ids.append(class_id)
|
||||
|
||||
indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence_threshold, 0.4)
|
||||
|
||||
person_detected = False
|
||||
if len(indices) > 0:
|
||||
for i in indices.flatten():
|
||||
(x, y, w, h) = boxes[i]
|
||||
color = [int(c) for c in self.colors[class_ids[i]]]
|
||||
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
|
||||
cls_name = self.classes[class_ids[i]] if 0 <= class_ids[i] < len(self.classes) else str(
|
||||
class_ids[i])
|
||||
text = f"{cls_name}: {confidences[i]:.2f}"
|
||||
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
||||
if not person_detected and str(cls_name).lower() == 'person':
|
||||
person_detected = True
|
||||
# Auto-trigger alert if a person is detected on any camera and alerts are enabled
|
||||
try:
|
||||
if person_detected:
|
||||
parent_window = self.parent()
|
||||
if parent_window is not None:
|
||||
# trigger_alert() has its own internal guards (enabled, cooldown, playing)
|
||||
parent_window.trigger_alert()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Detection error: {e}")
|
||||
|
||||
return frame
|
||||
46
mucapy/compile.py
Normal file
46
mucapy/compile.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
from PIL import Image
|
||||
import PyInstaller.__main__
|
||||
import PyQt5
|
||||
|
||||
# Paths
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
MAIN_SCRIPT = os.path.join(ROOT_DIR, "main.py")
|
||||
STYLING_DIR = os.path.join(ROOT_DIR, "styling")
|
||||
|
||||
# Icon paths
|
||||
PNG_ICON = os.path.join(STYLING_DIR, "logo.png")
|
||||
ICO_ICON = os.path.join(STYLING_DIR, "logo.ico")
|
||||
|
||||
# Convert PNG to ICO
|
||||
img = Image.open(PNG_ICON)
|
||||
img.save(ICO_ICON, format="ICO", sizes=[(256,256), (128,128), (64,64), (32,32), (16,16)])
|
||||
print(f"Converted {PNG_ICON} to {ICO_ICON}")
|
||||
|
||||
# Detect PyQt5 platforms folder automatically
|
||||
pyqt_dir = os.path.dirname(PyQt5.__file__)
|
||||
platforms_path = None
|
||||
|
||||
# Walk recursively to find the 'platforms' folder
|
||||
for root, dirs, files in os.walk(pyqt_dir):
|
||||
if 'platforms' in dirs:
|
||||
platforms_path = os.path.join(root, 'platforms')
|
||||
break
|
||||
|
||||
if platforms_path is None or not os.path.exists(platforms_path):
|
||||
raise FileNotFoundError(f"Could not locate PyQt5 'platforms' folder under {pyqt_dir}")
|
||||
|
||||
print(f"Using PyQt5 platforms folder: {platforms_path}")
|
||||
|
||||
# Build EXE with PyInstaller
|
||||
PyInstaller.__main__.run([
|
||||
MAIN_SCRIPT,
|
||||
'--noconfirm',
|
||||
'--onefile',
|
||||
'--windowed',
|
||||
f'--icon={ICO_ICON}',
|
||||
# Only include the platforms folder (minimal requirement for PyQt5)
|
||||
'--add-data', f'{platforms_path};PyQt5/Qt/plugins/platforms',
|
||||
])
|
||||
|
||||
print("Build complete! Check the 'dist' folder for the executable.")
|
||||
51
mucapy/initqt.py
Normal file
51
mucapy/initqt.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import os
|
||||
import platform
|
||||
|
||||
class initQT:
|
||||
"""
|
||||
This is a QOL Change if you prefer to do it the hard way. Or you just like to get Fist Fucked then i suggest you remove the Function Calls in the
|
||||
Main Call of the Class!
|
||||
|
||||
This is not needed for Windows as it does this Automatically (at least i think)
|
||||
If some shit that is supposed to happen isnt happening. Step through this Class Via Debuggers!
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.session_type = None # This is for QT #
|
||||
#--------------------#
|
||||
self.env = os.environ.copy() # This is for CV2 #
|
||||
|
||||
def getenv(self):
|
||||
# If the OS is Linux get Qts Session Type
|
||||
if platform.system() == "Linux":
|
||||
self.session_type = os.getenv("XDG_SESSION_TYPE")
|
||||
return self.session_type
|
||||
else:
|
||||
# If theres no Type then Exit 1
|
||||
print(
|
||||
"No XDG Session Type found!"
|
||||
"echo $XDG_SESSION_TYPE"
|
||||
"Run this command in bash!"
|
||||
)
|
||||
pass
|
||||
|
||||
def setenv(self):
|
||||
# Set the Session Type to the one it got
|
||||
if self.session_type:
|
||||
os.environ["XDG_SESSION_TYPE"] = self.session_type
|
||||
else:
|
||||
# If this fails then just exit with 1
|
||||
print(
|
||||
"Setting the XDG_SESSION_TYPE failed!"
|
||||
f"export XDG_SESSION_TYPE={self.session_type}"
|
||||
"run this command in bash"
|
||||
)
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def shutupCV():
|
||||
# This needs some fixing as this only works before importing CV2 ; too much refactoring work tho!
|
||||
if platform.system() == "Linux":
|
||||
os.environ["OPENCV_LOG_LEVEL"] = "ERROR"
|
||||
else:
|
||||
pass
|
||||
2230
mucapy/main.py
2230
mucapy/main.py
File diff suppressed because it is too large
Load Diff
BIN
mucapy/styling/logo.ico
Normal file
BIN
mucapy/styling/logo.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
BIN
mucapy/styling/sound/alert.wav
Normal file
BIN
mucapy/styling/sound/alert.wav
Normal file
Binary file not shown.
97
mucapy/utility.py
Normal file
97
mucapy/utility.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import os
|
||||
import platform
|
||||
try:
|
||||
import winreg
|
||||
except ImportError:
|
||||
pass
|
||||
import ctypes
|
||||
from PyQt5.QtWidgets import QWidget, QApplication
|
||||
from PyQt5.QtCore import QEvent
|
||||
|
||||
class conversion:
|
||||
_symbols = ("B", "KiB", "MiB", "GiB", "TiB", "PiB")
|
||||
_thresholds = [1 << (10 * i) for i in range(len(_symbols))]
|
||||
|
||||
@staticmethod
|
||||
def bytes_to_human(n: int) -> str:
|
||||
try:
|
||||
n = int(n)
|
||||
except Exception:
|
||||
return str(n)
|
||||
|
||||
if n < 1024:
|
||||
return f"{n} B"
|
||||
|
||||
thresholds = conversion._thresholds
|
||||
symbols = conversion._symbols
|
||||
i = min(len(thresholds) - 1, (n.bit_length() - 1) // 10)
|
||||
val = n / thresholds[i]
|
||||
|
||||
# Pick a faster formatting branch
|
||||
if val >= 100:
|
||||
return f"{val:.0f} {symbols[i]}"
|
||||
elif val >= 10:
|
||||
return f"{val:.1f} {symbols[i]}"
|
||||
else:
|
||||
return f"{val:.2f} {symbols[i]}"
|
||||
|
||||
class getpath:
|
||||
@staticmethod
|
||||
def resource_path(relative_path: str):
|
||||
base_path = os.path.dirname(os.path.abspath(__file__))
|
||||
return os.path.join(base_path, relative_path)
|
||||
|
||||
class windows:
|
||||
@staticmethod
|
||||
def is_windows_darkmode() -> bool:
|
||||
if platform.system() != "Windows":
|
||||
return False
|
||||
|
||||
try:
|
||||
key_path = r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize"
|
||||
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_path) as key:
|
||||
# 0 = dark mode, 1 = light mode
|
||||
value, _ = winreg.QueryValueEx(key, "AppsUseLightTheme")
|
||||
# print(f"AppsUseLightTheme: {value}") # optional debug
|
||||
return value == 0
|
||||
except Exception as e:
|
||||
print(f"Could not read Windows registry for dark mode: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def set_dark_titlebar(widget: QWidget):
|
||||
"""Apply dark titlebar on Windows to any top-level window."""
|
||||
if platform.system() != "Windows":
|
||||
return
|
||||
if not widget.isWindow(): # only top-level windows
|
||||
return
|
||||
if windows.is_windows_darkmode():
|
||||
try:
|
||||
hwnd = int(widget.winId())
|
||||
DWMWA_USE_IMMERSIVE_DARK_MODE = 20
|
||||
value = ctypes.c_int(1)
|
||||
res = ctypes.windll.dwmapi.DwmSetWindowAttribute(
|
||||
hwnd,
|
||||
DWMWA_USE_IMMERSIVE_DARK_MODE,
|
||||
ctypes.byref(value),
|
||||
ctypes.sizeof(value)
|
||||
)
|
||||
if res != 0:
|
||||
# fallback for some Windows builds
|
||||
DWMWA_USE_IMMERSIVE_DARK_MODE = 19
|
||||
ctypes.windll.dwmapi.DwmSetWindowAttribute(
|
||||
hwnd,
|
||||
DWMWA_USE_IMMERSIVE_DARK_MODE,
|
||||
ctypes.byref(value),
|
||||
ctypes.sizeof(value)
|
||||
)
|
||||
except Exception as e:
|
||||
print("Failed to set dark titlebar:", e)
|
||||
|
||||
class darkmodechildren(QApplication):
|
||||
def notify(self, receiver, event):
|
||||
# Only handle top-level windows
|
||||
if isinstance(receiver, QWidget) and receiver.isWindow():
|
||||
if event.type() == QEvent.WinIdChange:
|
||||
windows.set_dark_titlebar(receiver)
|
||||
return super().notify(receiver, event)
|
||||
@@ -4,3 +4,6 @@ PyQt5==5.15.11
|
||||
requests==2.32.3
|
||||
psutil==7.0.0
|
||||
pytest==8.4.0
|
||||
comtypes==1.4.13
|
||||
rtsp==1.1.12
|
||||
#pynvcodec==0.0.6
|
||||
|
||||
74
tac.html
Normal file
74
tac.html
Normal file
@@ -0,0 +1,74 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>WebRTC Test</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>WebRTC Test (Camera 0)</h1>
|
||||
<video id="remoteVideo" autoplay playsinline></video>
|
||||
<script>
|
||||
async function startWebRTC() {
|
||||
const pc = new RTCPeerConnection();
|
||||
// Request to receive a remote video track from the server.
|
||||
// Without this recvonly transceiver the generated offer may contain no
|
||||
// video m= section and the server cannot add a sendonly video track
|
||||
// (aiortc will fail to compute directions). Adding a recvonly
|
||||
// transceiver signals the client wants to receive video.
|
||||
try {
|
||||
pc.addTransceiver('video', {direction: 'recvonly'});
|
||||
} catch (e) {
|
||||
// Some browsers / older APIs may use a different signature; ignore failure
|
||||
console.debug('addTransceiver failed, continuing:', e);
|
||||
}
|
||||
|
||||
// Create video element
|
||||
const remoteVideo = document.getElementById('remoteVideo');
|
||||
pc.ontrack = (event) => {
|
||||
console.log('ontrack event:', event);
|
||||
remoteVideo.srcObject = event.streams[0];
|
||||
// Log track info
|
||||
const tracks = event.streams[0].getTracks();
|
||||
tracks.forEach(t => console.log('Remote track:', t.kind, t.readyState, t.enabled));
|
||||
// Try to play the video programmatically (some browsers require a gesture for autoplay with audio)
|
||||
remoteVideo.play().then(()=>{
|
||||
console.log('remoteVideo.play() succeeded');
|
||||
}).catch(err=>{
|
||||
console.warn('remoteVideo.play() failed:', err);
|
||||
});
|
||||
};
|
||||
|
||||
try {
|
||||
// Create a data channel for testing (optional)
|
||||
const channel = pc.createDataChannel("testChannel");
|
||||
channel.onopen = () => console.log("Data channel open");
|
||||
channel.onmessage = (e) => console.log("Received message:", e.data);
|
||||
|
||||
// Create SDP offer
|
||||
const offer = await pc.createOffer();
|
||||
await pc.setLocalDescription(offer);
|
||||
|
||||
// Send offer to server
|
||||
const response = await fetch('http://localhost:1337/offer/0', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ sdp: offer.sdp, type: offer.type })
|
||||
});
|
||||
|
||||
const answer = await response.json();
|
||||
console.log("Answer received from server:", answer);
|
||||
|
||||
// Set remote description
|
||||
await pc.setRemoteDescription(answer);
|
||||
|
||||
} catch (err) {
|
||||
console.error("WebRTC error:", err);
|
||||
}
|
||||
}
|
||||
|
||||
// Start WebRTC on page load
|
||||
startWebRTC();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
16
test_webrtc.py
Normal file
16
test_webrtc.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import asyncio
|
||||
from mucapy.WebServer import VideoWebServer
|
||||
|
||||
async def main():
|
||||
server = VideoWebServer()
|
||||
await server.start_server()
|
||||
print("Server started, press Ctrl+C to stop")
|
||||
try:
|
||||
while True:
|
||||
await asyncio.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopping server...")
|
||||
await server.stop_server()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Reference in New Issue
Block a user