import cv2
import numpy as np
import onnxruntime as rt
# Load the ONNX model
sess = rt.InferenceSession("yolov5s.onnx")
# Load the video
cap = cv2.VideoCapture('A1.mp4')
# Get video info
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total number of frames
# Define the codec and create a VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('A2.mp4', fourcc, fps, (width, height))
frame_count = 0 # Initialize frame counter
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
# Preprocess the frame
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (640, 640))
frame_normalized = frame_resized.astype(np.float32) / 255.0
frame_reshaped = np.transpose(frame_normalized, (2,0,1))
frame_expanded = np.expand_dims(frame_reshaped, axis=0)
# Run the frame through the model
input_name = sess.get_inputs()[0].name
pred_onx = sess.run(None, {input_name: frame_expanded})
# TODO: Postprocess the output and draw bounding boxes on the frame
# This depends on how you want to interpret the model's output
# Write the frame into the file 'output_video.mp4'
out.write(frame)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Update and display progress
frame_count += 1
progress = (frame_count / total_frames) * 100
print(f'Progress: {progress:.2f}%')
else:
break
# Release everything when job is finished
cap.release()
out.release()
cv2.destroyAllWindows()