this work now

This commit is contained in:
rattatwinko
2025-05-23 19:30:28 +02:00
parent 16370d9a74
commit e4d07f3d6f
5 changed files with 678 additions and 132 deletions

0
fedora_setup.sh Normal file → Executable file
View File

80
opencam/coco.names Normal file
View File

@@ -0,0 +1,80 @@
person
bicycle
car
motorbike
aeroplane
bus
train
truck
boat
traffic light
fire hydrant
stop sign
parking meter
bench
bird
cat
dog
horse
sheep
cow
elephant
bear
zebra
giraffe
backpack
umbrella
handbag
tie
suitcase
frisbee
skis
snowboard
sports ball
kite
baseball bat
baseball glove
skateboard
surfboard
tennis racket
bottle
wine glass
cup
fork
knife
spoon
bowl
banana
apple
sandwich
orange
broccoli
carrot
hot dog
pizza
donut
cake
chair
sofa
pottedplant
bed
diningtable
toilet
tvmonitor
laptop
mouse
remote
keyboard
cell phone
microwave
oven
toaster
sink
refrigerator
book
clock
vase
scissors
teddy bear
hair drier
toothbrush

View File

@@ -3,11 +3,12 @@ import cv2
import numpy as np
from PyQt5.QtWidgets import (QApplication, QMainWindow, QLabel, QPushButton,
QVBoxLayout, QWidget, QComboBox, QMessageBox,
QProgressBar, QDialog)
QProgressBar, QDialog, QFileDialog, QCheckBox, QStatusBar)
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QTimer, Qt
from PyQt5.QtCore import QTimer, Qt, QThread, pyqtSignal
import os
import urllib.request
import time
class DownloadProgressBar(QDialog):
def __init__(self, parent=None):
@@ -18,7 +19,7 @@ class DownloadProgressBar(QDialog):
layout = QVBoxLayout()
self.label = QLabel("Downloading...")
self.label = QLabel("Downloading YOLO files...")
layout.addWidget(self.label)
self.progress = QProgressBar()
@@ -26,101 +27,218 @@ class DownloadProgressBar(QDialog):
self.progress.setMaximum(100)
layout.addWidget(self.progress)
self.cancel_button = QPushButton("Cancel")
self.cancel_button.clicked.connect(self.reject)
layout.addWidget(self.cancel_button)
self.setLayout(layout)
def update_progress(self, current, total):
percentage = int((current / total) * 100)
self.progress.setValue(percentage)
QApplication.processEvents()
def set_file_label(self, filename):
def update_label(self, filename):
self.label.setText(f"Downloading {filename}...")
QApplication.processEvents()
class DownloadProgressHandler:
def __init__(self, progress_dialog):
self.progress_dialog = progress_dialog
self.current_size = 0
self.total_size = 0
self.last_update = 0
def handle_progress(self, count, block_size, total_size):
self.total_size = total_size
self.current_size += block_size
self.current_size = count * block_size
current_time = time.time()
if current_time - self.last_update > 0.1:
self.progress_dialog.update_progress(self.current_size, self.total_size)
self.last_update = current_time
class DetectionThread(QThread):
detection_done = pyqtSignal(np.ndarray)
def __init__(self, net, classes, output_layers, frame):
super().__init__()
self.net = net
self.classes = classes
self.output_layers = output_layers
self.frame = frame.copy()
self.running = True
def run(self):
if not self.running or self.net is None or self.classes is None:
self.detection_done.emit(self.frame)
return
height, width = self.frame.shape[:2]
blob = cv2.dnn.blobFromImage(self.frame, 1/255.0, (416, 416), swapRB=True, crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
box = detection[0:4] * np.array([width, height, width, height])
(center_x, center_y, box_width, box_height) = box.astype("int")
x = int(center_x - (box_width / 2))
y = int(center_y - (box_height / 2))
boxes.append([x, y, int(box_width), int(box_height)])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
if len(indexes) > 0:
for i in indexes.flatten():
(x, y, w, h) = boxes[i]
label = str(self.classes[class_ids[i]])
confidence = confidences[i]
color = (0, 255, 0)
cv2.rectangle(self.frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(self.frame, f"{label}: {confidence:.2f}",
(x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
self.detection_done.emit(self.frame)
def stop(self):
self.running = False
self.wait()
class CameraApp(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Object Detection Camera Viewer")
self.setGeometry(100, 100, 800, 600)
self.setWindowTitle("Advanced Object Detection Camera Viewer")
self.setGeometry(100, 100, 1000, 800)
# Initialize attributes first
self.camera_index = 0
self.cap = None
self.timer = QTimer()
self.timer.timeout.connect(self.update_frame)
self.available_cameras = []
# Initialize object detection
self.detection_thread = None
self.net = None
self.classes = None
self.output_layers = None
self.load_yolo()
self.detection_enabled = True
self.model_loaded = False
self.fps = 0
self.frame_count = 0
self.last_fps_update = time.time()
# Initialize UI
self.init_ui()
# Load YOLO after UI is ready
QTimer.singleShot(100, self.load_yolo)
def init_ui(self):
self.central_widget = QWidget()
self.setCentralWidget(self.central_widget)
layout = QVBoxLayout()
# Video display
self.video_label = QLabel(self)
self.video_label.setAlignment(Qt.AlignCenter)
self.video_label.setMinimumSize(640, 480)
layout.addWidget(self.video_label)
# Camera controls
self.camera_select = QComboBox(self)
self.detect_cameras()
layout.addWidget(self.camera_select)
self.camera_select.currentIndexChanged.connect(self.change_camera)
# Detection controls
self.detection_checkbox = QCheckBox("Enable Object Detection", self)
self.detection_checkbox.setChecked(True)
self.detection_checkbox.stateChanged.connect(self.toggle_detection)
layout.addWidget(self.detection_checkbox)
# Button controls
button_layout = QVBoxLayout()
self.start_button = QPushButton("Start Camera", self)
self.start_button.clicked.connect(self.start_camera)
layout.addWidget(self.start_button)
button_layout.addWidget(self.start_button)
self.stop_button = QPushButton("Stop Camera", self)
self.stop_button.clicked.connect(self.stop_camera)
layout.addWidget(self.stop_button)
button_layout.addWidget(self.stop_button)
self.snapshot_button = QPushButton("Take Snapshot", self)
self.snapshot_button.clicked.connect(self.take_snapshot)
button_layout.addWidget(self.snapshot_button)
layout.addLayout(button_layout)
# Status bar
self.status_bar = QStatusBar()
self.setStatusBar(self.status_bar)
self.status_label = QLabel("Initializing...")
self.status_bar.addPermanentWidget(self.status_label)
self.central_widget.setLayout(layout)
# Set up timer after UI is ready
self.timer.timeout.connect(self.update_frame)
# Detect cameras after UI is ready
QTimer.singleShot(100, self.detect_cameras)
def toggle_detection(self, state):
self.detection_enabled = state == Qt.Checked
self.status_label.setText(f"Detection {'enabled' if self.detection_enabled else 'disabled'}")
def detect_cameras(self):
self.camera_select.clear()
self.available_cameras = []
# Try to get the list of available cameras using DirectShow backend
for i in range(10): # Check first 10 indexes
cap = cv2.VideoCapture(i, cv2.CAP_DSHOW)
# Check for available cameras (try both backends)
backends = [cv2.CAP_DSHOW, cv2.CAP_V4L2, cv2.CAP_ANY]
max_cameras_to_check = 10
for backend in backends:
for i in range(max_cameras_to_check):
try:
cap = cv2.VideoCapture(i, backend)
if cap.isOpened():
# Get camera name
cap.set(cv2.CAP_PROP_SETTINGS, 1) # This might show camera properties dialog
name = f"Camera {i}"
# Get camera properties
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
# Try to get camera resolution to verify it's working
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if width and height:
name = f"{name} ({int(width)}x{int(height)})"
self.available_cameras.append(i)
self.camera_select.addItem(name, i)
name = f"Camera {i} ({width}x{height})"
if fps > 0:
name += f" @ {fps:.1f}FPS"
self.available_cameras.append((i, backend))
self.camera_select.addItem(name, (i, backend))
cap.release()
break # Only add once per camera index
cap.release()
except Exception as e:
# Ignore errors for non-existent cameras
pass
if len(self.available_cameras) == 0:
self.status_label.setText("No cameras detected")
QMessageBox.warning(self, "Warning", "No cameras detected!")
print("Error: No available cameras detected.")
else:
print(f"Detected {len(self.available_cameras)} cameras")
self.status_label.setText(f"Found {len(self.available_cameras)} cameras")
def change_camera(self, index):
if index >= 0: # Only change if a valid camera is selected
self.camera_index = self.camera_select.itemData(index)
if index >= 0 and index < len(self.available_cameras):
self.camera_index, backend = self.camera_select.itemData(index)
if self.cap is not None and self.cap.isOpened():
self.stop_camera()
self.start_camera()
@@ -129,24 +247,32 @@ class CameraApp(QMainWindow):
if self.cap is not None:
self.stop_camera()
try:
self.cap = cv2.VideoCapture(self.camera_index, cv2.CAP_DSHOW)
if not self.cap.isOpened():
QMessageBox.warning(self, "Error", f"Cannot open camera {self.camera_index}")
print(f"Error: Cannot open camera {self.camera_index}")
if not self.available_cameras:
QMessageBox.warning(self, "Error", "No cameras available")
return
# Set camera properties for better performance
try:
current_index = max(0, self.camera_select.currentIndex())
self.camera_index, backend = self.camera_select.itemData(current_index)
self.cap = cv2.VideoCapture(self.camera_index, backend)
if not self.cap.isOpened():
QMessageBox.warning(self, "Error", f"Cannot open camera {self.camera_index}")
self.status_label.setText(f"Failed to open camera {self.camera_index}")
return
# Set camera properties
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.cap.set(cv2.CAP_PROP_FPS, 30)
self.cap.set(cv2.CAP_PROP_FPS, 60)
self.cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
self.timer.start(30)
print(f"Started camera {self.camera_index}")
self.status_label.setText(f"Camera {self.camera_index} started")
except Exception as e:
QMessageBox.critical(self, "Error", f"Error starting camera: {str(e)}")
print(f"Error starting camera: {str(e)}")
self.status_label.setText(f"Error: {str(e)}")
def stop_camera(self):
if self.cap:
@@ -154,45 +280,92 @@ class CameraApp(QMainWindow):
self.cap.release()
self.cap = None
self.video_label.clear()
print("Camera stopped")
self.status_label.setText("Camera stopped")
if self.detection_thread:
self.detection_thread.stop()
self.detection_thread = None
def load_yolo(self):
# Download YOLO files if they don't exist
weights_path = "yolov3.weights"
config_path = "yolov3.cfg"
weights_url = "https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights"
config_url = "https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4-tiny.cfg"
classes_url = "https://raw.githubusercontent.com/AlexeyAB/darknet/master/data/coco.names"
weights_path = "yolov4-tiny.weights"
config_path = "yolov4-tiny.cfg"
classes_path = "coco.names"
if not all(os.path.exists(f) for f in [weights_path, config_path, classes_path]):
QMessageBox.information(self, "Download", "Downloading YOLO model files. This may take a moment...")
self.download_yolo_files()
files_valid = True
for path, min_size in [(weights_path, 10000000), (config_path, 10000), (classes_path, 1000)]:
if not os.path.exists(path) or os.path.getsize(path) < min_size:
files_valid = False
break
if not files_valid:
reply = QMessageBox.question(
self, "Download Files",
"YOLO model files need to be downloaded (~25MB). Continue?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.No:
self.status_label.setText("Object detection disabled - model not loaded")
return
self.download_yolo_files(weights_url, config_url, classes_url)
try:
self.net = cv2.dnn.readNet(weights_path, config_path)
self.net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
if cv2.cuda.getCudaEnabledDeviceCount() > 0:
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
self.status_label.setText("Using CUDA acceleration")
else:
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
self.status_label.setText("Using CPU (no CUDA available)")
with open(classes_path, "r") as f:
self.classes = [line.strip() for line in f.readlines()]
layer_names = self.net.getLayerNames()
self.output_layers = [layer_names[i - 1] for i in self.net.getUnconnectedOutLayers()]
print("YOLO model loaded successfully")
self.model_loaded = True
self.status_label.setText("YOLOv4 model loaded successfully")
except Exception as e:
print(f"Error loading YOLO model: {str(e)}")
self.model_loaded = False
self.status_label.setText(f"Error loading model: {str(e)}")
QMessageBox.warning(self, "Error", "Failed to load object detection model")
def download_yolo_files(self):
def download_yolo_files(self, weights_url, config_url, classes_url):
files = {
"yolov3.weights": "https://pjreddie.com/media/files/yolov3.weights",
"yolov3.cfg": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg",
"coco.names": "https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names"
"yolov4-tiny.weights": weights_url,
"yolov4-tiny.cfg": config_url,
"coco.names": classes_url
}
progress_dialog = DownloadProgressBar(self)
progress_dialog.show()
try:
for file_name, url in files.items():
if not os.path.exists(file_name):
if os.path.exists(file_name):
try:
if file_name == "yolov4-tiny.weights" and os.path.getsize(file_name) < 10000000:
raise ValueError("Incomplete weights file")
elif file_name == "yolov4-tiny.cfg" and os.path.getsize(file_name) < 10000:
raise ValueError("Incomplete config file")
elif file_name == "coco.names" and os.path.getsize(file_name) < 1000:
raise ValueError("Incomplete names file")
continue
except Exception as e:
print(f"Existing file {file_name} is invalid: {str(e)}")
print(f"Downloading {file_name}...")
progress_dialog.set_file_label(file_name)
progress_dialog.update_label(file_name)
try:
progress_handler = DownloadProgressHandler(progress_dialog)
urllib.request.urlretrieve(
@@ -204,91 +377,87 @@ class CameraApp(QMainWindow):
except Exception as e:
print(f"Error downloading {file_name}: {str(e)}")
QMessageBox.critical(self, "Error", f"Failed to download {file_name}")
progress_dialog.close()
return
def detect_objects(self, frame):
if self.net is None or self.classes is None:
return frame
height, width, _ = frame.shape
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
# Showing information on the screen
class_ids = []
confidences = []
boxes = []
# Showing information on the screen
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(self.classes[class_ids[i]])
confidence = confidences[i]
color = (0, 255, 0) # Green
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, f"{label} {confidence:.2f}", (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return frame
finally:
progress_dialog.close()
def update_frame(self):
if self.cap is None or not self.cap.isOpened():
return
ret, frame = self.cap.read()
if ret:
# Perform object detection
frame = self.detect_objects(frame)
if not ret:
self.status_label.setText("Failed to capture frame")
return
# Convert the frame from BGR to RGB
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
self.frame_count += 1
current_time = time.time()
if current_time - self.last_fps_update >= 1.0:
self.fps = self.frame_count / (current_time - self.last_fps_update)
self.last_fps_update = current_time
self.frame_count = 0
self.status_label.setText(f"Running at {self.fps:.1f} FPS")
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.detection_enabled and self.model_loaded:
if self.detection_thread is None or not self.detection_thread.isRunning():
self.detection_thread = DetectionThread(self.net, self.classes, self.output_layers, rgb_frame)
self.detection_thread.detection_done.connect(self.display_frame)
self.detection_thread.start()
else:
self.display_frame(rgb_frame)
else:
self.display_frame(rgb_frame)
def display_frame(self, frame):
h, w, ch = frame.shape
bytes_per_line = ch * w
q_image = QImage(frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
# Convert the frame to QImage
q_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
# Scale the image to fit the label while maintaining aspect ratio
scaled_pixmap = QPixmap.fromImage(q_image).scaled(
self.video_label.size(),
aspectRatioMode=1 # Qt.KeepAspectRatio
Qt.KeepAspectRatio,
Qt.SmoothTransformation
)
self.video_label.setPixmap(scaled_pixmap)
def take_snapshot(self):
if self.cap is None or not self.cap.isOpened():
QMessageBox.warning(self, "Warning", "No active camera to take snapshot from")
return
ret, frame = self.cap.read()
if not ret:
QMessageBox.warning(self, "Warning", "Failed to capture frame")
return
options = QFileDialog.Options()
file_name, _ = QFileDialog.getSaveFileName(
self, "Save Snapshot", "",
"JPEG Images (*.jpg *.jpeg);;PNG Images (*.png)",
options=options)
if file_name:
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
success = cv2.imwrite(file_name, rgb_frame)
if success:
self.status_label.setText(f"Snapshot saved to {file_name}")
else:
print("Failed to get frame from camera")
QMessageBox.warning(self, "Error", "Failed to save snapshot")
def closeEvent(self, event):
self.stop_camera()
if self.detection_thread:
self.detection_thread.stop()
event.accept()
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setStyle("Fusion")
window = CameraApp()
window.show()
sys.exit(app.exec_())

294
opencam/yolov4-tiny.cfg Normal file
View File

@@ -0,0 +1,294 @@
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00261
burn_in=1000
max_batches = 2000200
policy=steps
steps=1600000,1800000
scales=.1,.1
#weights_reject_freq=1001
#ema_alpha=0.9998
#equidistant_point=1000
#num_sigmas_reject_badlabels=3
#badlabels_rejection_percentage=0.2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[route]
layers=-1
groups=2
group_id=1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[route]
layers = -1,-2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -6,-1
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[route]
layers=-1
groups=2
group_id=1
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[route]
layers = -1,-2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -6,-1
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[route]
layers=-1
groups=2
group_id=1
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[route]
layers = -1,-2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -6,-1
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
##################################
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
scale_x_y = 1.05
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
ignore_thresh = .7
truth_thresh = 1
random=0
resize=1.5
nms_kind=greedynms
beta_nms=0.6
#new_coords=1
#scale_x_y = 2.0
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 23
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 1,2,3
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
scale_x_y = 1.05
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
ignore_thresh = .7
truth_thresh = 1
random=0
resize=1.5
nms_kind=greedynms
beta_nms=0.6
#new_coords=1
#scale_x_y = 2.0

3
requirements.txt Normal file
View File

@@ -0,0 +1,3 @@
PyQt5>=5.15.0
opencv-python>=4.5.0
numpy>=1.19.0