说明:刷机后 jetpack=4.6.4 cuda=10.2.300 cudnn=8.2.1 tensorrt=8.2.1
一、卸载jetson自带的opencv
sudo apt-get purge libopencv*
查看是否删除干净
import cv2
cv2.__version__
如果报namespace错误,到系统自带python3.6.9 site-packge下删除cv2空文件夹。
二、编译安装cuda版本opencv
终端执行
wget -O opencv.zip https://github.com/opencv/opencv/archive/4.5.0.zip
wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/4.5.0.zip
unzip opencv.zip
unzip opencv_contrib.zip
mkdir -p build && cd build
cmake -DOPENCV_GENERATE_PKGCONFIG=ON -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-10.2/ -DWITH_PYTHON=ON -DBUILD_TIFF=ON -DWITH_CUDA=ON -DWITH_CUDNN=ON -DOPENCV_DNN_CUDA=ON -DWITH_CUBLAS=ON -DWITH_TBB=ON -DOPENCV_DNN_CUDA=ON -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=/usr/local -DBUILD_OPENCV_PYTHON2=OFF -DCUDA_ARCH_BIN=7.2 -DWITH_MFX=ON -DWITH_OPENGL=ON -DOPENCV_CUDA_ARCH_PTX="" -DINSTALL_PYTHON_EXAMPLES=ON -DINSTALL_C_EXAMPLES=ON -DBUILD_JAVA=OFF -DWITH_NVCUVID=ON -DWITH_OPENGL=ON -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib-4.5.1/modules ../opencv-4.5.1
make -j8
make install
三、添加环境变量
sudo gedit /etc/ld.so.conf.d/opencv.conf
加入:/usr/local/lib
sudo ldconfig # 使配置生效
在/etc/bash.bashrc
中添加pkgconfig路径:
sudo gedit /etc/bash.bashrc
添加如下内容
PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig
export PKG_CONFIG_PATH
配置生效
source /etc/bash.bashrc # 使配置生效
sudo updatedb
jtop查看是否编译成功
四、使用opencv硬解码视频
代码:test.py
import sys
import argparse
import cv2
import time
from ipdb import set_trace
WINDOW_NAME = 'CameraDemo'
def parse_args():
# Parse input arguments
desc = 'Capture and display live camera video on Jetson TX2/TX1'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--file', dest='use_file',
help='use video file (remember to also set --path)',
action='store_true')
parser.add_argument('--path', dest='file_path',
help='file URI, e.g. test.mp4',
default=None, type=str)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [1]',
default=1, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [1920]',
default=1920, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [1080]',
default=1080, type=int)
args = parser.parse_args()
return args
#h264 mp4 encode
def open_local_file(path):
gst_str = ("filesrc location={} ! qtdemux ! h264parse ! nvv4l2decoder ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink ").format(path)
return cv2.VideoCapture(gst_str,cv2.CAP_GSTREAMER)
#mpeg mp4 encode
#def open_local_file(path):
#gst_str = ("filesrc location={} ! qtdemux ! mpeg4videoparse ! nvv4l2decoder ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink").format(path)
#return cv2.VideoCapture(gst_str,cv2.CAP_GSTREAMER)
def open_cam_rtsp(uri, width, height, latency):
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)RGB ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
# On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(width, height):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, width, height)
cv2.moveWindow(WINDOW_NAME, 0, 0)
cv2.setWindowTitle(WINDOW_NAME, 'Camera Demo for Jetson TX2/TX1')
def read_cam(cap):
start = time.time()
num=0
show_help = True
full_scrn = False
help_text = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
font = cv2.FONT_HERSHEY_PLAIN
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
# Check to see if the user has closed the window
# If yes, terminate the program
break
_, img = cap.read() # grab the next image frame from camera
num+=1
if num==100:
end = time.time()
print("fps is : %f"%(100/(end-start)))
start = time.time()
num=0
if show_help:
cv2.putText(img, help_text, (11, 20), font,
1.0, (32, 32, 32), 4, cv2.LINE_AA)
cv2.putText(img, help_text, (10, 20), font,
1.0, (240, 240, 240), 1, cv2.LINE_AA)
cv2.imshow(WINDOW_NAME, img)
key = cv2.waitKey(10)
if key == 27: # ESC key: quit program
break
elif key == ord('H') or key == ord('h'): # toggle help message
show_help = not show_help
elif key == ord('F') or key == ord('f'): # toggle fullscreen
full_scrn = not full_scrn
if full_scrn:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
def main():
args = parse_args()
print('Called with args:')
print(args)
print('OpenCV version: {}'.format(cv2.__version__))
if args.use_file:
cap = open_local_file(args.file_path)
elif args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev,
args.image_width,
args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width,
args.image_height)
if not cap.isOpened():
sys.exit('Failed to open camera!')
open_window(args.image_width, args.image_height)
read_cam(cap)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
运行
python3 test.py --file --path "/home/nvidia/liux/project/tensorrt_test/sample.mp4"
注意:视频编码格式区分H264和mpeg-4,用到的gstreamer解码器不同,参考代码的不同函数
NVDEC变绿说明开始硬解码了
五、在虚环境中使用cuda版本的opencv
查看cuda版本opencv动态库位置
>>> import cv2
>>> print(cv2)
<module 'cv2' from '/home/nvidia/mambaforge/envs/yolov5/lib/python3.6/site-packages/cv2.cpython-36m-aarch64-linux-gnu.so'>
>>>
拷贝到虚环境
cp /usr/local/lib/python3.6/dist-packages/cv2/python-3.6/cv2.cpython-36m-aarch64-linux-gnu.so /home/nvidia/mambaforge/envs/yolov5/lib/python3.6/site-packages
至此激活虚环境就可以在虚环境下硬解码视频了