将YOLOv8及其改进的模型转换为rknn模型之后,可以通过官方给出的API评估模型的性能。
①用rknn.eval_perf()评估模型运行的耗时,可以精确到每一层。
from rknn.api import RKNN
if __name__ == '__main__':
rknn = RKNN()
# 使用Load_rknn接口导入rknn模型
rknn.load_rknn(path= './path/to/yolov8_RK3588.rknn')
# 使用init_runtime接口初始化运行时环境
ret = rknn.init_runtime(target='rk3588',device_id='03d205b241a91ccc')
if ret != 0:
print('Init runtime environment failed!')
exit(ret)
print('done')
# 使用eval_perf接口进行性能评估
rknn.eval_perf()
rknn.release()
②用rknn.eval_memory()评估模型运行的占用的内存。
from rknn.api import RKNN
if __name__ == '__main__':
rknn = RKNN()
# 使用Load_rknn接口导入rknn模型
rknn.load_rknn(path= './path/to/yolov8_RK3588.rknn')
# 使用init_runtime接口初始化运行时环境
rknn.init_runtime(target='rk3588', device_id='03d205b241a91ccc', eval_mem=True)
# 使用eval_memory接口进行内存评估
rknn.eval_memory(
is_print = True, # is_print是否打印性能信息
)
rknn.release()
③自己写了一个脚本用于计算rknn模型运行在数据集的验证集上的map50,大致逻辑是在RK3588上把验证集的图片推理一遍,把推理的结果保存为文本文件,将文本文件复制到pc的yolov8文件夹下,与对应的labels文件计算每一张图片推理结果的AP,最后求平均值。要注意的是,map是对所有的目标求AP后求平均值,我的代码里是对每一张图片中的所有推理结果求出一个map,再将所有图片的结果加起来求平均值,因此如果每张图中的目标越少,计算得到的误差越小。(代码仅供参考)
# RK3588端推理代码
import os
import cv2
from rknn.api import RKNN
import numpy as np
RKNN_MODEL = "./path/to/yolov8_RK3588.rknn"
IMG_FOLDER = "./datasets/images/"
RESULT_PATH = './datasets/results/'
img_width = 640
img_height = 640
CLASSES = ['Wear', 'Pitting', 'Miss', 'One-third miss' ]
OBJ_THRESH = 0.25
NMS_THRESH = 0.45
MODEL_SIZE = (640, 640)
color_palette = np.random.uniform(0, 255, size=(len(CLASSES), 3))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def letter_box(im, new_shape, pad_color=(0,0,0), info_need=False):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# Compute padding
ratio = r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_color) # add border
if info_need is True:
return im, ratio, (dw, dh)
else:
return im
def filter_boxes(boxes, box_confidences, box_class_probs):
"""Filter boxes with object threshold.
"""
box_confidences = box_confidences.reshape(-1)
candidate, class_num = box_class_probs.shape
class_max_score = np.max(box_class_probs, axis=-1)
classes = np.argmax(box_class_probs, axis=-1)
_class_pos = np.where(class_max_score* box_confidences >= OBJ_THRESH)
scores = (class_max_score * box_confidences)[_class_pos]
boxes = boxes[_class_pos]
classes = classes[_class_pos]
return boxes, classes, scores
def nms_boxes(boxes, scores):
"""Suppress non-maximal boxes.
# Returns
keep: ndarray, index of effective boxes.
"""
x = boxes[:, 0]
y = boxes[:, 1]
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
areas = w * h
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x[i], x[order[1:]])
yy1 = np.maximum(y[i], y[order[1:]])
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
inter = w1 * h1
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= NMS_THRESH)[0]
order = order[inds + 1]
keep = np.array(keep)
return keep
def softmax(x, axis=None):
x = x - x.max(axis=axis, keepdims=True)
y = np.exp(x)
return y / y.sum(axis=axis, keepdims=True)
def dfl(position):
# Distribution Focal Loss (DFL)
n,c,h,w = position.shape
p_num = 4
mc = c//p_num
y = position.reshape(n,p_num,mc,h,w)
y = softmax(y, 2)
acc_metrix = np.array(range(mc),dtype=float).reshape(1,1,mc,1,1)
y = (y*acc_metrix).sum(2)
return y
def box_process(position):
grid_h, grid_w = position.shape[2:4]
col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
col = col.reshape(1, 1, grid_h, grid_w)
row = row.reshape(1, 1, grid_h, grid_w)
grid = np.concatenate((col, row), axis=1)
stride = np.array([MODEL_SIZE[1]//grid_h, MODEL_SIZE[0]//grid_w]).reshape(1,2,1,1)
position = dfl(position)
box_xy = grid +0.5 -position[:,0:2,:,:]
box_xy2 = grid +0.5 +position[:,2:4,:,:]
xyxy = np.concatenate((box_xy*stride, box_xy2*stride), axis=1)
return xyxy
def post_process(input_data):
boxes, scores, classes_conf = [], [], []
defualt_branch=3
pair_per_branch = len(input_data)//defualt_branch
# Python 忽略 score_sum 输出
for i in range(defualt_branch):
boxes.append(box_process(input_data[pair_per_branch*i]))
classes_conf.append(input_data[pair_per_branch*i+1])
scores.append(np.ones_like(input_data[pair_per_branch*i+1][:,:1,:,:], dtype=np.float32))
def sp_flatten(_in):
ch = _in.shape[1]
_in = _in.transpose(0,2,3,1)
return _in.reshape(-1, ch)
boxes = [sp_flatten(_v) for _v in boxes]
classes_conf = [sp_flatten(_v) for _v in classes_conf]
scores = [sp_flatten(_v) for _v in scores]
boxes = np.concatenate(boxes)
classes_conf = np.concatenate(classes_conf)
scores = np.concatenate(scores)
# filter according to threshold
boxes, classes, scores = filter_boxes(boxes, scores, classes_conf)
# nms
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
inds = np.where(classes == c)
b = boxes[inds]
c = classes[inds]
s = scores[inds]
keep = nms_boxes(b, s)
if len(keep) != 0:
nboxes.append(b[keep])
nclasses.append(c[keep])
nscores.append(s[keep])
if not nclasses and not nscores:
return None, None, None
boxes = np.concatenate(nboxes)
classes = np.concatenate(nclasses)
scores = np.concatenate(nscores)
return boxes, classes, scores
def draw_detections(img, left, top, right, bottom, score, class_id):
"""
Draws bounding boxes and labels on the input image based on the detected objects.
Args:
img: The input image to draw detections on.
box: Detected bounding box.
score: Corresponding detection score.
class_id: Class ID for the detected object.
Returns:
None
"""
# Retrieve the color for the class ID
color = color_palette[class_id]
# Draw the bounding box on the image
cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), color, 2)
# Create the label text with class name and score
label = f"{CLASSES[class_id]}: {score:.2f}"
# Calculate the dimensions of the label text
(label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
# Calculate the position of the label text
label_x = left
label_y = top - 10 if top - 10 > label_height else top + 10
# Draw a filled rectangle as the background for the label text
cv2.rectangle(img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color,
cv2.FILLED)
# Draw the label text on the image
cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
def draw(image, boxes, scores, classes):
img_h, img_w = image.shape[:2]
# Calculate scaling factors for bounding box coordinates
x_factor = img_w / MODEL_SIZE[0]
y_factor = img_h / MODEL_SIZE[1]
for box, score, cl in zip(boxes, scores, classes):
x1, y1, x2, y2 = [int(_b) for _b in box]
left = int(x1* x_factor)
top = int(y1 * y_factor) - 10
right = int(x2 * x_factor)
bottom = int(y2 * y_factor) + 10
print('class: {}, score: {}'.format(CLASSES[cl], score))
print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(left, top, right, bottom))
# Retrieve the color for the class ID
draw_detections(image, left, top, right, bottom, score, cl)
# cv2.rectangle(image, (left, top), (right, bottom), color, 2)
# cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
# (left, top - 6),
# cv2.FONT_HERSHEY_SIMPLEX,
# 0.6, (0, 0, 255), 2)
def convert_to_relative_coordinates(box, img_width, img_height):
xmin, ymin, xmax, ymax = box
cx = (xmin + xmax) / 2 / img_width
cy = (ymin + ymax) / 2 / img_height
width = (xmax - xmin) / img_width
height = (ymax - ymin) / img_height
return cx, cy, width, height
def save_predictions_to_txt(predictions, file_path, img_width, img_height):
with open(file_path, 'w') as f:
for pred in predictions:
boxes, classes, scores = pred[0], pred[1], pred[2]
for box, cls, score in zip(boxes, classes, scores):
cx, cy, width, height = convert_to_relative_coordinates(box, img_width, img_height)
f.write(f"{cls} {cx} {cy} {width} {height} {score}\n")
if __name__ == '__main__':
# 创建RKNN对象
rknn = RKNN()
#加载RKNN模型
print('--> Load RKNN model')
ret = rknn.load_rknn(RKNN_MODEL)
if ret != 0:
print('Load RKNN model failed')
exit(ret)
print('done')
# 初始化 runtime 环境
print('--> Init runtime environment')
# run on RK356x/RK3588 with Debian OS, do not need specify target.
ret = rknn.init_runtime(target='rk3588', device_id='03d205b241a91ccc')
# 如果使用电脑进行模拟测试
# ret = rknn.init_runtime()
if ret != 0:
print('Init runtime environment failed!')
exit(ret)
print('done')
# 数据处理
img_list = os.listdir(IMG_FOLDER)
for i in range(len(img_list)):
predictions = []
img_name = img_list[i]
img_path = os.path.join(IMG_FOLDER, img_name)
if not os.path.exists(img_path):
print("{} is not found", img_name)
continue
img_src = cv2.imread(img_path)
if img_src is None:
print("文件不存在\n")
# Due to rga init with (0,0,0), we using pad_color (0,0,0) instead of (114, 114, 114)
pad_color = (0,0,0)
img = letter_box(im= img_src.copy(), new_shape=(MODEL_SIZE[1], MODEL_SIZE[0]), pad_color=(0,0,0))
#img = cv2.resize(img_src, (640, 512), interpolation=cv2.INTER_LINEAR) # direct resize
input = np.expand_dims(img, axis=0)
outputs = rknn.inference([input])
boxes, classes, scores = post_process(outputs)
result = [boxes, classes, scores]
predictions.append(result)
img_p = img_src.copy()
if boxes is not None:
draw(img_p, boxes, scores, classes)
# 保存结果
if not os.path.exists(RESULT_PATH):
os.mkdir(RESULT_PATH)
result_path = os.path.join(RESULT_PATH, img_name)
cv2.imwrite(result_path, img_p)
print('Detection result save to {}'.format(result_path))
gt_path = img_path.replace('images', 'labels').replace('.jpg', '.txt')
save_predictions_to_txt(predictions, gt_path, img_width, img_height)
pass
rknn.release()
# PC端计算代码
import numpy as np
import os
def read_txt_lab(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
data = []
for line in lines:
parts = line.strip().split()
cls = int(parts[0])
cx = float(parts[1])
cy = float(parts[2])
width = float(parts[3])
height = float(parts[4])
data.append([cls, cx, cy, width, height])
return data
def read_txt_pred(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
data = []
for line in lines:
parts = line.strip().split()
cls = int(parts[0])
cx = float(parts[1])
cy = float(parts[2])
width = float(parts[3])
height = float(parts[4])
score = float(parts[5])
data.append([cls, cx, cy, width, height, score])
return data
def convert_to_absolute_coordinates(box, img_width, img_height):
cls, cx, cy, width, height = box
xmin = (cx - width / 2) * img_width
ymin = (cy - height / 2) * img_height
xmax = (cx + width / 2) * img_width
ymax = (cy + height / 2) * img_height
return [xmin, ymin, xmax, ymax, cls]
def convert_to_absolute_coordinates_pred(box, img_width, img_height):
cls, cx, cy, width, height, _ = box
xmin = (cx - width / 2) * img_width
ymin = (cy - height / 2) * img_height
xmax = (cx + width / 2) * img_width
ymax = (cy + height / 2) * img_height
return [xmin, ymin, xmax, ymax, cls]
def calculate_iou(box1, box2):
x1, y1, x2, y2 = box1[:4]
x1g, y1g, x2g, y2g = box2[:4]
xi1 = max(x1, x1g)
yi1 = max(y1, y1g)
xi2 = min(x2, x2g)
yi2 = min(y2, y2g)
inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1)
box1_area = (x2 - x1) * (y2 - y1)
box2_area = (x2g - x1g) * (y2g - y1g)
union_area = box1_area + box2_area - inter_area
iou = inter_area / union_area
return iou
def calculate_tp_fp(pred_boxes, pred_classes, pred_scores, gt_boxes, gt_classes, iou_threshold=0.5):
tp = np.zeros(pred_boxes.shape[0])
fp = np.zeros(pred_boxes.shape[0])
detected = []
for i, pred_box in enumerate(pred_boxes):
ious = []
for j, gt_box in enumerate(gt_boxes):
if pred_classes[i] == gt_classes[j] and j not in detected:
iou = calculate_iou(pred_box, gt_box)
ious.append((iou, j))
if ious:
iou, j = max(ious, key=lambda x: x[0])
if iou >= iou_threshold:
tp[i] = 1
detected.append(j)
else:
fp[i] = 1
else:
fp[i] = 1
return tp, fp
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
ap, p, r = [], [], []
# Compute AP per class
for c in unique_classes:
i = pred_cls == c
n_gt = (target_cls == c).sum() # Number of ground truth objects
n_p = i.sum() # Number of predicted objects
if n_p == 0 and n_gt == 0:
continue
elif n_p == 0 or n_gt == 0:
ap.append(0)
p.append(0)
r.append(0)
else:
# Accumulate FPs and TPs
fpc = np.cumsum(1 - tp[i])
tpc = np.cumsum(tp[i])
# Recall
recall_curve = tpc / (n_gt + 1e-16)
r.append(recall_curve[-1])
# Precision
precision_curve = tpc / (tpc + fpc)
p.append(precision_curve[-1])
# AP from recall-precision curve
ap.append(compute_ap(recall_curve, precision_curve))
# Compute F1 score (harmonic mean of precision and recall)
p, r, ap = np.array(p), np.array(r), np.array(ap)
f1 = 2 * p * r / (p + r + 1e-16)
return p, r, ap, f1, unique_classes.astype(int)
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# Integrate area under curve
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
# 读取预测结果和标签
predictions_file_path = './datasets/predict'
# labels_file_path = './datasets/labels/00188.txt'
total_map = 0.0
for root, dirs, files in os.walk(predictions_file_path):
for file in files:
print(file)
file_path = os.path.join(root, file)
labels_file_path = file_path.replace('predict', 'labels')
predictions = read_txt_pred(file_path)
labels = read_txt_lab(labels_file_path)
# 图像尺寸
img_width = 1080
img_height = 1920
# 转换为绝对坐标
pred_boxes = np.array([convert_to_absolute_coordinates_pred(pred, img_width, img_height) for pred in predictions])
gt_boxes = np.array([convert_to_absolute_coordinates(gt, img_width, img_height) for gt in labels])
pred_classes = np.array([pred[0] for pred in predictions])
gt_classes = np.array([gt[0] for gt in labels])
pred_scores = np.array([pred[5] for pred in predictions]) # 示例置信度分数
# 计算TP和FP
tp, fp = calculate_tp_fp(pred_boxes, pred_classes, pred_scores, gt_boxes[:, :4], gt_classes)
# 计算AP
precision, recall, ap, f1, ap_class = ap_per_class(tp, pred_scores, pred_classes, gt_classes)
# 计算mAP@0.5
mAP50 = np.mean(ap)
total_map += mAP50
print("mAP@0.5:", mAP50)
print("total_map@0.5:", total_map)
# mAP=total_map / IMG_NUM