本来是想测试一下,mmseg库中inference_segmentor
函数是否可以直接传入图像而不是路径,为此找了一个案例,如下:
import os
from PyQt5.QtCore import QLibraryInfo
# from PySide2.QtCore import QLibraryInfo
import cv2
os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = QLibraryInfo.location(
QLibraryInfo.PluginsPath
)
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
def main():
#load the config file whcih adapt to the model
# 加载相应的config文件,这里我稍微修改了以下config中的 test_cfg 为 ‘whole’,方便后面debug
config_file = '/home/xilm/mmlab/mmsegmentation-master/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py'
# download the checkpoint from model zoo and put it in `checkpoints/`
# url: <https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth>
# 加载训练好的模型 load the trained model
checkpoint_file = '/home/xilm/Downloads/fcn_d6_r101-d16_769x769_80k_cityscapes_20210306_120016-e33adc4f.pth'
# build the model from a config file and a checkpoint file
#输入图像
img = '/home/xilm/Cityscapes/sequence/leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000001_leftImg8bit.png'
#init_segmentor函数的作用是根据config file, checkpoint_file加载相应的模型
model = init_segmentor(config_file, checkpoint_file, 'cuda:0')
# test a single image
# inference_segmentaor后面会详细讲
result = inference_segmentor(model, img)
print(result[0].shape)
# show the results
show_result_pyplot(model, img, result)
# show_result_pyplot(
# model,
# img,
# result,
# get_palette(args.palette),
# opacity=opacity)
if __name__ == '__main__':
main()
这里在复现的时候遇到了一个问题:
QObject::moveToThread: Current thread (0x60d9e60) is not the object's thread (0x65edc50).
Cannot move to target thread (0x60d9e60)
qt.qpa.plugin: Could not load the Qt platform plugin "xcb" in "/home/xilm/anaconda3/lib/python3.9/site-packages/cv2/qt/plugins" even though it was found.
This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem.
Available platform plugins are: xcb, eglfs, linuxfb, minimal, minimalegl, offscreen, vnc, wayland-egl, wayland, wayland-xcomposite-egl, wayland-xcomposite-glx, webgl.
这个是老问题了,可以看这篇博客;
然后把图像从路径读取改为cv2.imread
读取为一个图像。
import os
from PyQt5.QtCore import QLibraryInfo
# from PySide2.QtCore import QLibraryInfo
import cv2
os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = QLibraryInfo.location(
QLibraryInfo.PluginsPath
)
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
def main():
#load the config file whcih adapt to the model
# 加载相应的config文件,这里我稍微修改了以下config中的 test_cfg 为 ‘whole’,方便后面debug
config_file = '/home/xilm/mmlab/mmsegmentation-master/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py'
# download the checkpoint from model zoo and put it in `checkpoints/`
# url: <https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth>
# 加载训练好的模型 load the trained model
checkpoint_file = '/home/xilm/Downloads/fcn_d6_r101-d16_769x769_80k_cityscapes_20210306_120016-e33adc4f.pth'
# build the model from a config file and a checkpoint file
#输入图像
img = '/home/xilm/Cityscapes/sequence/leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000001_leftImg8bit.png'
img = cv2.imread(img)
#init_segmentor函数的作用是根据config file, checkpoint_file加载相应的模型
model = init_segmentor(config_file, checkpoint_file, 'cuda:0')
# test a single image
# inference_segmentaor后面会详细讲
result = inference_segmentor(model, img)
print(result[0].shape)
# show the results
show_result_pyplot(model, img, result)
# show_result_pyplot(
# model,
# img,
# result,
# get_palette(args.palette),
# opacity=opacity)
if __name__ == '__main__':
main()
说明
inference_segmentor
函数是可以直接传入图像的。