1、下载文件包:https://github.com/qqwweee/keras-yolo3
2、为此模型建立anaconda虚拟环境。
conda creat -n yolo python=3.6
安装相应的环境,conda install 安装即可
版本号对应:
- python 3.6.9
- cuda 9.2
- cudnn 7.3.1
- tensorflow-gpu 1.12.0
- keras 2.2.4
3、安装依赖
pip install Pillow
pip install matplotlib
conda install ffmpeg
pip install opencv-contrib-python
4、下载权重文件
https://pjreddie.com/media/files/yolov3.weights
也可以用命令行下载:
wget https://pjreddie.com/media/files/yolov3.weights
如果特别慢可以百度网盘下载:链接:https://pan.baidu.com/s/1vFlOhClLDcyvUfu0JatVtA
提取码:k7lo
5、转化权重为keras模型
将权重文件转换成.h5文件(代码包中有convert.py文件负责,生成的.h5文件在model_data中)
命令行输入:
python convert.py yolov3.cfg yolov3.weights model_data/yolo.h5
然后可以测试一下环境或者权重是否成功匹配好:
命令号输入:
python yolo_video.py --image
然后输入图片名字(单张)
6、准备数据集
①建立一下名字和对应关系的文件夹
#在keras-yolo3下创建
VOCdevkit
└── VOC2012
├── Annotations (存标注好的xml文件,标注可用labelImg)
├── ImageSets
│ ├── Layout
│ ├── Main (存放4个txt文件,装着图片名字,train.txt test.txt trainval.txt val.txt预先建好)
│ └── Segmentation
├── JPEGImages (放图片)
└── labels
没有备注的没用上。
②按比例分配测试图片和训练图片
写一个test.py用来分配1:9数据(Main中),执行完此程序Main中的四个txt文件中会有图片名字。
import os
import random
trainval_percent = 0.1
train_percent = 0.9
xmlfilepath = '/home/sys507/sun/VOCdevkit/VOC2012/Annotations'
txtsavepath = '/home/sys507/sun/VOCdevkit/VOC2012/ImageSets/Main'
total_xml = os.listdir(xmlfilepath)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
ftrainval = open('/home/sys507/sun/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt', 'w')
ftest = open('/home/sys507/sun/VOCdevkit/VOC2012/ImageSets/Main/test.txt', 'w')
ftrain = open('/home/sys507/sun/VOCdevkit/VOC2012/ImageSets/Main/train.txt', 'w')
fval = open('/home/sys507/sun/VOCdevkit/VOC2012/ImageSets/Main/val.txt', 'w')
for i in list:
name = total_xml[i][:-4] + '\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftest.write(name)
else:
fval.write(name)
else:
ftrain.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
③修该voc_annotation中的类别并执行此文件
修改成自己的类和对应的生成文件名字
修改完执行,可用pycharm直接执行,也可命令行
python voc_annotation.py
执行后会在主目录生成3个txt文件
④生成适合自己数据的anchor
执行k-means.py文件,会在主目录生成yolo_anchor.txt文件。
⑤修改model_data中voc_classes.txt和coco_classes.txt中个的类别,换成自己的类别。
7、开始训练
先在主目录建立一个名字为logs的文件夹,然后在里面再建立一个名为000的文件夹,用于存储最后的权重。
修改train,py中对应路径、初始训练权重、batch尺寸、迭代次数 ,讯改完执行train.py文件,开始训练。最后
备注:也可以用以下代码替换带原有的train.py代码,这个就是全部层数都训练,源文件是先训练后三层(冻结前面249层),然后再训练全部层。但是这个代码有个缺点就是不能保存中间权重,只有最终权重。
"""
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = '2012_train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'yolo_anchors.txt'
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
input_shape = (416, 416) # multiple of 32, hw
model = create_model(input_shape, anchors, len(class_names))
train(model, annotation_path, input_shape, anchors, len(class_names), log_dir=log_dir)
def train(model, annotation_path, input_shape, anchors, num_classes, log_dir='logs/'):
model.compile(optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred})
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
monitor='val_loss', save_weights_only=True, save_best_only=True, period=1)
batch_size = 10
val_split = 0.05
with open(annotation_path) as f:
lines = f.readlines()
np.random.shuffle(lines)
num_val = int(len(lines) * val_split)
num_train = len(lines) - num_val
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrap(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=data_generator_wrap(lines[num_train:], batch_size, input_shape, anchors,
num_classes),
validation_steps=max(1, num_val // batch_size),
epochs=200,
initial_epoch=0)
model.save_weights(log_dir + 'trained_weights.h5')
def get_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=False, freeze_body=False,
weights_path='trained_weights.h5'):
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h // {
0: 32, 1: 16, 2: 8}[l], w // {
0: 32, 1: 16, 2: 8}[l], \
num_anchors // 3, num_classes + 5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors // 3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body:
# Do not freeze 3 output layers.
num = len(model_body.layers) - 7
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss'