Mask-RCNN(二):将自己训练的 Mask-RCNN 模型用于物体实时检测
以下代码运行采用win10系统的电脑,编程语言python。
参考博客:
- https://blog.csdn.net/weixin_39235110/article/details/98585745.
- https://blog.csdn.net/eereere/article/details/80178595.
1.前期工作
已训练好的 Mask-RCNN 模型(.h5文件),训练方法见我的上一篇博客https://blog.csdn.net/qfdzwly/article/details/107647003.
2.代码及运行效果
import cv2
import numpy as np
from mrcnn.config import Config
class MouseConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "box"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + balloon
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
def random_colors(N):
np.random.seed(1)
colors = [tuple(255 * np.random.rand(3)) for _ in range(N)]
return colors
def apply_mask(image, mask, color, alpha=0.5):
for n, c in enumerate(color):
image[:, :, n] = np.where(
mask == 1,
image[:, :, n] * (1 - alpha) + alpha * c,
image[:, :, n]
)
return image
def display_instances(image, boxes, masks, ids, names, scores):
n_instances = boxes.shape[0]
if not n_instances:
print('No instances to display')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
colors = random_colors(n_instances)
height, width = image.shape[:2]
for i, color in enumerate(colors):
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
mask = masks[:, :, i]
image = apply_mask(image, mask, color)
image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
label = names[ids[i]]
score = scores[i] if scores is not None else None
caption = '{}{:.2f}'.format(label, score) if score else label
image = cv2.putText(
image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2
)
return image
if __name__ == '__main__':
import os
import sys
import random
# import utils
import time
ROOT_DIR = os.path.abspath("C:\\Users\\LARA\\Desktop\\Mask_RCNN-master")
sys.path.append(ROOT_DIR)
from mrcnn import utils
import mrcnn.model as modellib
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "C:\\Users\\LARA\\Desktop\\Mask_RCNN-master\\logs\\shapes20200729T2252\\mask_rcnn_shapes_0030.h5")
print('COCO_MODEL_PATH: ',COCO_MODEL_PATH)
if not os.path.exists(COCO_MODEL_PATH):
print('cannot find coco_model')
class InferenceConfig(MouseConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'box', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
capture = cv2.VideoCapture(1)
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
while True:
ret, frame = capture.read()
results = model.detect([frame], verbose=0)
r = results[0]
frame = display_instances(
frame, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores']
)
cv2.imshow('camera1', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(time.time())
capture.release()
cv2.destroyAllWindows()
代码运行效果如下,用30张图片训练的网络,效果并不是很好,可以识别并标定出来就是胜利了∩▂∩
但是实时性较差,我的电脑上只有大概每秒两帧的检测速度。
3.自己代码需进行的更改