python多个摄像头_Jetson测试多个CSI摄像机(Python版本),jetson,摄像头,python

这篇博客展示了如何使用Python和OpenCV在NVIDIA Jetson Nano上同时读取并显示两个CSI摄像头的视频流。代码创建了两个线程分别处理每个摄像头的数据,并通过gstreamer_pipeline设置传感器模式和分辨率。当按下ESC键时,程序会停止并释放资源。
摘要由CSDN通过智能技术生成

# MIT License

# Copyright (c) 2019,2020 JetsonHacks

# See license

# A very simple code snippet

# Using two CSI cameras (such as the Raspberry Pi Version 2) connected to a

# NVIDIA Jetson Nano Developer Kit (Rev B01) using OpenCV

# Drivers for the camera and OpenCV are included in the base image in JetPack 4.3+

# This script will open a window and place the camera stream from each camera in a window

# arranged horizontally.

# The camera streams are each read in their own thread, as when done sequentially there

# is a noticeable lag

# For better performance, the next step would be to experiment with having the window display

# in a separate thread

import cv2

import threading

import numpy as np

# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera

# Flip the image by setting the flip_method (most common values: 0 and 2)

# display_width and display_height determine the size of each camera pane in the window on the screen

left_camera = None

right_camera = None

class CSI_Camera:

def __init__ (self) :

# Initialize instance variables

# OpenCV video capture element

self.video_capture = None

# The last captured image from the camera

self.frame = None

self.grabbed = False

# The thread where the video capture runs

self.read_thread = None

self.read_lock = threading.Lock()

self.running = False

def open(self, gstreamer_pipeline_string):

try:

self.video_capture = cv2.VideoCapture(

gstreamer_pipeline_string, cv2.CAP_GSTREAMER

)

except RuntimeError:

self.video_capture = None

print("Unable to open camera")

print("Pipeline: " + gstreamer_pipeline_string)

return

# Grab the first frame to start the video capturing

self.grabbed, self.frame = self.video_capture.read()

def start(self):

if self.running:

print('Video capturing is already running')

return None

# create a thread to read the camera image

if self.video_capture != None:

self.running=True

self.read_thread = threading.Thread(target=self.updateCamera)

self.read_thread.start()

return self

def stop(self):

self.running=False

self.read_thread.join()

def updateCamera(self):

# This is the thread to read images from the camera

while self.running:

try:

grabbed, frame = self.video_capture.read()

with self.read_lock:

self.grabbed=grabbed

self.frame=frame

except RuntimeError:

print("Could not read image from camera")

# FIX ME - stop and cleanup thread

# Something bad happened

def read(self):

with self.read_lock:

frame = self.frame.copy()

grabbed=self.grabbed

return grabbed, frame

def release(self):

if self.video_capture != None:

self.video_capture.release()

self.video_capture = None

# Now kill the thread

if self.read_thread != None:

self.read_thread.join()

# Currently there are setting frame rate on CSI Camera on Nano through gstreamer

# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)

def gstreamer_pipeline(

sensor_id=0,

sensor_mode=3,

capture_width=1280,

capture_height=720,

display_width=1280,

display_height=720,

framerate=30,

flip_method=0,

):

return (

"nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "

"video/x-raw(memory:NVMM), "

"width=(int)%d, height=(int)%d, "

"format=(string)NV12, framerate=(fraction)%d/1 ! "

"nvvidconv flip-method=%d ! "

"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "

"videoconvert ! "

"video/x-raw, format=(string)BGR ! appsink"

% (

sensor_id,

sensor_mode,

capture_width,

capture_height,

framerate,

flip_method,

display_width,

display_height,

)

)

def start_cameras():

left_camera = CSI_Camera()

left_camera.open(

gstreamer_pipeline(

sensor_id=0,

sensor_mode=3,

flip_method=0,

display_height=540,

display_width=960,

)

)

left_camera.start()

right_camera = CSI_Camera()

right_camera.open(

gstreamer_pipeline(

sensor_id=1,

sensor_mode=3,

flip_method=0,

display_height=540,

display_width=960,

)

)

right_camera.start()

cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)

if (

not left_camera.video_capture.isOpened()

or not right_camera.video_capture.isOpened()

):

# Cameras did not open, or no camera attached

print("Unable to open any cameras")

# TODO: Proper Cleanup

SystemExit(0)

while cv2.getWindowProperty("CSI Cameras", 0) >= 0 :

_ , left_image=left_camera.read()

_ , right_image=right_camera.read()

camera_images = np.hstack((left_image, right_image))

cv2.imshow("CSI Cameras", camera_images)

# This also acts as

keyCode = cv2.waitKey(30) & 0xFF

# Stop the program on the ESC key

if keyCode == 27:

break

left_camera.stop()

left_camera.release()

right_camera.stop()

right_camera.release()

cv2.destroyAllWindows()

if __name__ == "__main__":

start_cameras()

树莓派4B是一款功能强大的单板计算机,可以通过Python编程语言轻松地调用CSI摄像头。 在开始之前,我们需要确保树莓派4B的操作系统已经安装并正确配置。接下来,我们需要连接CSI摄像头到树莓派的摄像头接口上。 首先,我们需要通过树莓派的配置界面打开摄像头功能。在终端中输入以下命令: sudo raspi-config 然后选择"Interfacing Options",进入子菜单后选择“Camera”,并选择启用CSI摄像头。接下来,重新启动树莓派以使配置生效。 接下来,我们可以使用Python的picamera库来调用CSI摄像头。首先,我们需要确保已经安装了picamera库。在终端中使用以下命令安装: sudo apt-get update sudo apt-get install python-picamera 一旦安装完成,我们可以通过编写Python代码来调用CSI摄像头。以下是一个简单的示例代码: ```python import picamera import time # 创建摄像头对象 camera = picamera.PiCamera() # 设置摄像头的分辨率 camera.resolution = (640, 480) # 开始预览摄像头画面 camera.start_preview() # 等待3秒钟 time.sleep(3) # 拍摄一张照片 camera.capture('image.jpg') # 停止预览 camera.stop_preview() # 关闭摄像头 camera.close() ``` 这个示例代码使用picamera库创建了一个摄像头对象,并设置了摄像头的分辨率为640x480像素。然后,它开始预览摄像头的画面,等待3秒钟后拍摄一张照片,并保存为image.jpg。最后,停止预览并关闭摄像头。 以上就是使用树莓派4B通过Python调用CSI摄像头的简单步骤。你可以根据自己的需求进行更复杂的摄像头操作和图像处理。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值