import sys
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QPushButton, QFileDialog
from PyQt5.QtGui import QPixmap, QImage
import cv2
from ultralytics import YOLO
class PedestrianVehicleDetectionUI(QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.model = YOLO('best.pt')
def initUI(self):
self.setWindowTitle('Pedestrian and Vehicle Detection System')
self.layout = QVBoxLayout()
self.label = QLabel(self)
self.layout.addWidget(self.label)
self.button = QPushButton('Open Image or Video', self)
self.button.clicked.connect(self.open_file)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
def open_file(self):
options = QFileDialog.Options()
file_path, _ = QFileDialog.getOpenFileName(self, "Open File", "", "All Files (*);;MP4 Files (*.mp4);;JPEG Files (*.jpg);;PNG Files (*.png)", options=options)
if file_path:
if file_path.endswith('.mp4'):
self.detect_pedestrian_vehicle_video(file_path)
else:
self.detect_pedestrian_vehicle_image(file_path)
def detect_pedestrian_vehicle_image(self, file_path):
frame = cv2.imread(file_path)
results = self.model(frame)
person_count = 0
vehicle_count = 0
for result in results:
bbox = result['bbox']
label = result['label']
confidence = result['confidence']
if label == 'person':
person_count += 1
elif label == 'vehicle':
vehicle_count += 1
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
cv2.putText(frame, f'{label} {confidence:.2f}', (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.putText(frame, f'Persons: {person_count}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(frame, f'Vehicles: {vehicle_count}', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
height, width, channel = frame.shape
bytesPerLine = 3 * width
qImg = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
self.label.setPixmap(QPixmap.fromImage(qImg))
def detect_pedestrian_vehicle_video(self, file_path):
cap = cv2.VideoCapture(file_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model(frame)
person_count = 0
vehicle_count = 0
for result in results:
bbox = result['bbox']
label = result['label']
confidence = result['confidence']
if label == 'person':
person_count += 1
elif label == 'vehicle':
vehicle_count += 1
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
cv2.putText(frame, f'{label} {confidence:.2f}', (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
height, width, channel = frame.shape
bytesPerLine = 3 * width
qImg = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
self.label.setPixmap(QPixmap.fromImage(qImg))
QApplication.processEvents()
cap.release()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = PedestrianVehicleDetectionUI()
ex.show()
sys.exit(app.exec_())
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
- 26.
- 27.
- 28.
- 29.
- 30.
- 31.
- 32.
- 33.
- 34.
- 35.
- 36.
- 37.
- 38.
- 39.
- 40.
- 41.
- 42.
- 43.
- 44.
- 45.
- 46.
- 47.
- 48.
- 49.
- 50.
- 51.
- 52.
- 53.
- 54.
- 55.
- 56.
- 57.
- 58.
- 59.
- 60.
- 61.
- 62.
- 63.
- 64.
- 65.
- 66.
- 67.
- 68.
- 69.
- 70.
- 71.
- 72.
- 73.
- 74.
- 75.
- 76.
- 77.
- 78.
- 79.
- 80.
- 81.
- 82.
- 83.
- 84.
- 85.
- 86.
- 87.
- 88.
- 89.
- 90.
- 91.
- 92.
- 93.
- 94.
- 95.
- 96.
- 97.
- 98.
- 99.