基于yolov3源码的训练过程

基于yolov3源码的训练过程

在yolo官网上使用一下指令可以对模型的数据集进行训练
./darknet detector train cfg/voc.data cfg/yolov3-voc.cfg darknet53.conv.74
但是训练的源码究竟是怎么样的很多人都没搞懂,特别是网络前向传播的计算损失函数,花了很长的时间去找,终于找到了。

1、从主函数开始,在example/darknet.c中找到run_detector()函数入口,如下第32行
int main(int argc, char **argv)
{
    //test_resize("data/bad.jpg");
    //test_box();
    //test_convolutional_layer();
    if(argc < 2){
        fprintf(stderr, "usage: %s <function>\n", argv[0]);
        return 0;
    }
    gpu_index = find_int_arg(argc, argv, "-i", 0);
    if(find_arg(argc, argv, "-nogpu")) {
        gpu_index = -1;  //明显设置不使用gpu;
    }

#ifndef GPU
    gpu_index = -1; //未定义gpu,则不使用;
#else
    if(gpu_index >= 0){
        cuda_set_device(gpu_index);  //按照你输入的gpu索引分配gpu;
    }
#endif

    if (0 == strcmp(argv[1], "average")){
        average(argc, argv);
    } else if (0 == strcmp(argv[1], "yolo")){
        run_yolo(argc, argv);
    } else if (0 == strcmp(argv[1], "super")){
        run_super(argc, argv);
    } else if (0 == strcmp(argv[1], "lsd")){
        run_lsd(argc, argv);
    } else if (0 == strcmp(argv[1], "detector")){//第一个参数是detector
        run_detector(argc, argv);
    } else if (0 == strcmp(argv[1], "detect")){
        float thresh = find_float_arg(argc, argv, "-thresh", .5);
        char *filename = (argc > 4) ? argv[4]: 0;
        char *outfile = find_char_arg(argc, argv, "-out", 0);
        int fullscreen = find_arg(argc, argv, "-fullscreen");
        test_detector("cfg/coco.data", argv[2], argv[3], filename, thresh, .5, outfile, fullscreen);
    } else if (0 == strcmp(argv[1], "cifar")){
        run_cifar(argc, argv);
    } else if (0 == strcmp(argv[1], "go")){
        run_go(argc, argv);
    } else if (0 == strcmp(argv[1], "rnn")){
        run_char_rnn(argc, argv);
    } else if (0 == strcmp(argv[1], "coco")){
        run_coco(argc, argv);
    } else if (0 == strcmp(argv[1], "classify")){
        predict_classifier("cfg/imagenet1k.data", argv[2], argv[3], argv[4], 5);
    } else if (0 == strcmp(argv[1], "classifier")){
        run_classifier(argc, argv);
    } else if (0 == strcmp(argv[1], "regressor")){
        run_regressor(argc, argv);
    } else if (0 == strcmp(argv[1], "segmenter")){
        run_segmenter(argc, argv);
    } else if (0 == strcmp(argv[1], "art")){
        run_art(argc, argv);
    } else if (0 == strcmp(argv[1], "tag")){
        run_tag(argc, argv);
    } else if (0 == strcmp(argv[1], "3d")){
        composite_3d(argv[2], argv[3], argv[4], (argc > 5) ? atof(argv[5]) : 0);
    } else if (0 == strcmp(argv[1], "test")){
        test_resize(argv[2]);
    } else if (0 == strcmp(argv[1], "captcha")){
        run_captcha(argc, argv);
    } else if (0 == strcmp(argv[1], "nightmare")){
        run_nightmare(argc, argv);
    } else if (0 == strcmp(argv[1], "rgbgr")){
        rgbgr_net(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "reset")){
        reset_normalize_net(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "denormalize")){
        denormalize_net(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "statistics")){
        statistics_net(argv[2], argv[3]);
    } else if (0 == strcmp(argv[1], "normalize")){
        normalize_net(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "rescale")){
        rescale_net(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "ops")){
        operations(argv[2]);
    } else if (0 == strcmp(argv[1], "speed")){
        speed(argv[2], (argc > 3 && argv[3]) ? atoi(argv[3]) : 0);
    } else if (0 == strcmp(argv[1], "oneoff")){
        oneoff(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "oneoff2")){
        oneoff2(argv[2], argv[3], argv[4], atoi(argv[5]));
    } else if (0 == strcmp(argv[1], "print")){
        print_weights(argv[2], argv[3], atoi(argv[4]));
    } else if (0 == strcmp(argv[1], "partial")){
        partial(argv[2], argv[3], argv[4], atoi(argv[5]));
    } else if (0 == strcmp(argv[1], "average")){
        average(argc, argv);
    } else if (0 == strcmp(argv[1], "visualize")){
        visualize(argv[2], (argc > 3) ? argv[3] : 0);
    } else if (0 == strcmp(argv[1], "mkimg")){
        mkimg(argv[2], argv[3], atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), argv[7]);
    } else if (0 == strcmp(argv[1], "imtest")){
        test_resize(argv[2]);
    } else {
        fprintf(stderr, "Not an option: %s\n", argv[1]);
    }
    return 0;
}
2、进入example/detector.c找到run_detector()函数定义,从而找到训练函数train_detector()函数入口,如下第49行
void run_detector(int argc, char **argv)
{
    char *prefix = find_char_arg(argc, argv, "-prefix", 0);
    float thresh = find_float_arg(argc, argv, "-thresh", .5); //读取得分阈值,默认为0.5
    float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
    int cam_index = find_int_arg(argc, argv, "-c", 0); //读取是否指定摄像头,默认为0
    int frame_skip = find_int_arg(argc, argv, "-s", 0);
    int avg = find_int_arg(argc, argv, "-avg", 3);
    if(argc < 4){
        fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
        return;
    }
    char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
    char *outfile = find_char_arg(argc, argv, "-out", 0);
    int *gpus = 0;
    int gpu = 0;
    int ngpus = 0;
    if(gpu_list){
        printf("%s\n", gpu_list);
        int len = strlen(gpu_list);
        ngpus = 1;
        int i;
        for(i = 0; i < len; ++i){
            if (gpu_list[i] == ',') ++ngpus;//获取gpu的数量
        }
        gpus = calloc(ngpus, sizeof(int));//分配ngpus个gpu出来
        for(i = 0; i < ngpus; ++i){
            gpus[i] = atoi(gpu_list);
            gpu_list = strchr(gpu_list, ',')+1;
        }
    } else {
        gpu = gpu_index;
        gpus = &gpu;
        ngpus = 1;
    }

    int clear = find_arg(argc, argv, "-clear");
    int fullscreen = find_arg(argc, argv, "-fullscreen");
    int width = find_int_arg(argc, argv, "-w", 0);
    int height = find_int_arg(argc, argv, "-h", 0);
    int fps = find_int_arg(argc, argv, "-fps", 0);
    //int class = find_int_arg(argc, argv, "-class", 0);

    char *datacfg = argv[3];   //数据 cfg/voc.data
    char *cfg = argv[4];    //网络配置 cfg/yolov3-voc.cfg
    char *weights = (argc > 5) ? argv[5] : 0;  //权重文件,如果命令行参数超过5个,则把第6个传给权重
    char *filename = (argc > 6) ? argv[6]: 0;//测试文件;
    if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
    else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); //第3个参数是train,表示进行训练
    else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
    else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
    else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
    else if(0==strcmp(argv[2], "demo")) {
        list *options = read_data_cfg(datacfg);
        int classes = option_find_int(options, "classes", 20);
        char *name_list = option_find_str(options, "names", "data/names.list");
        char **names = get_labels(name_list);
        demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
    }
    //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
    //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
进入train_detector()函数,找到loss = train_network(net, train);如下第129行
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{//(数据配置、网络配置、权重、gpu、gpu号、clear)
    list *options = read_data_cfg(datacfg);  //1、读取数据配置文件信息
    char *train_images = option_find_str(options, "train", "data/train.list");
    char *backup_directory = option_find_str(options, "backup", "/backup/");

    srand(time(0));

  /*srand函数是随机数发生器的初始化函数。srand和rand()配合使用产生伪随机数序列。
  rand函数在产生随机数前,需要系统提供的生成伪随机数序列的种子,rand根据这个种子的值产生一系列随机数。
  如果系统提供的种子没有变化,每次调用rand函数生成的伪随机数序列都是一样的。*/ 
   char *base = basecfg(cfgfile);  //2、读取网络配置
  // 提取配置文件名称中的主要信息,用于输出打印(无实质作用),比如提取`cfg/yolov3-voc.cfg’,输出yolov3-voc
    printf("%s\n", base);
    float avg_loss = -1;
    network **nets = calloc(ngpus, sizeof(network));

    srand(time(0));
    int seed = rand();
    int i;
  //每一次循环都会构建一个相同的神经网络,如果提供了初始训练参数,也会为每个网络导入相同的初始训练参数
    for(i = 0; i < ngpus; ++i){
        srand(seed);
#ifdef GPU
        cuda_set_device(gpus[i]); //如果定义了gpu则分配
#endif
        nets[i] = load_network(cfgfile, weightfile, clear); //3、网络配置对应的网络结构和加载权重
     //包含parse_network_cfg()和load_weights()
     //src/network.c;yolov2里面的解析网络和加载权重是直接在这里调用的,yolov3将两个函数集合在load_network里面,我觉得是更加好理解了!
        nets[i]->learning_rate *= ngpus;
    }
    srand(time(0));
    network *net = nets[0];
  //*imgs是一次加载到内存的图像数量,如果占内存太大的话可以把subdivisions或者batch调小一点
    int imgs = net->batch * net->subdivisions * ngpus;
    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
    data train, buffer;

    layer l = net->layers[net->n - 1];

    int classes = l.classes;
    float jitter = l.jitter;
  //[非均衡数据集处理:利用抖动(jittering)生成额外数据]
    list *plist = get_paths(train_images);
    //int N = plist->size;
    char **paths = (char **)list_to_array(plist);

    load_args args = get_base_args(net); //返回网络参数
    args.coords = l.coords;//将最后一层的参数返回给网络参数
    args.paths = paths;
    args.n = imgs;  //一次加载到内存中的图片数量
    args.m = plist->size;  //待训练图片数量
    args.classes = classes;
    args.jitter = jitter;
    args.num_boxes = l.max_boxes;
    args.d = &buffer;
    args.type = DETECTION_DATA;
    //args.type = INSTANCE_DATA;
    args.threads = 64;
  //声明线程ID
    pthread_t load_thread = load_data(args);  //src/data.c
    double time;
    int count = 0;
    //while(i*imgs < N*120){
    while(get_current_batch(net) < net->max_batches){
        if(l.random && count++%10 == 0){
            printf("Resizing\n");
            int dim = (rand() % 10 + 10) * 32;
            if (get_current_batch(net)+200 > net->max_batches) dim = 608;
            //int dim = (rand() % 4 + 16) * 32;
            printf("%d\n", dim);
            args.w = dim;
            args.h = dim;

            pthread_join(load_thread, 0);//以阻塞的方式等待thread指定的线程结束当函数返回时,被等待线程的资源被 收回。如果线程已经结束,那么该函数会立即返回。
            train = buffer;
            free_data(train);
            load_thread = load_data(args);

            #pragma omp parallel for
            for(i = 0; i < ngpus; ++i){
                resize_network(nets[i], dim, dim);  //src/network.c里面resize_network(network *net, int w, int h)
            //将layer调整为对应的输入大小;比如卷积、池化等
        }
            net = nets[0];
        }
        time=what_time_is_it_now();
        pthread_join(load_thread, 0);
        train = buffer;
        load_thread = load_data(args);

        /*
           int k;
           for(k = 0; k < l.max_boxes; ++k){
           box b = float_to_box(train.y.vals[10] + 1 + k*5);
           if(!b.x) break;
           printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
           }
         */
        /*
           int zz;
           for(zz = 0; zz < train.X.cols; ++zz){
           image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
           int k;
           for(k = 0; k < l.max_boxes; ++k){
           box b = float_to_box(train.y.vals[zz] + k*5, 1);
           printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
           draw_bbox(im, b, 1, 1,0,0);
           }
           show_image(im, "truth11");
           cvWaitKey(0);
           save_image(im, "truth11");
           }
         */

        printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);

        time=what_time_is_it_now();
        float loss = 0;
#ifdef GPU
        
    //gpu训练
     if(ngpus == 1){
            loss = train_network(net, train); //其中net是网络,train是训练数据data
        } else {
            loss = train_networks(nets, ngpus, train, 4);
        }
#else
        loss = train_network(net, train);
#endif
        if (avg_loss < 0) avg_loss = loss;
        avg_loss = avg_loss*.9 + loss*.1;

        i = get_current_batch(net);
        printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
        if(i%100==0){
#ifdef GPU
            if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
            char buff[256];
            sprintf(buff, "%s/%s.backup", backup_directory, base);
            save_weights(net, buff);
        }
        
      //前1000次迭代中每100次保存一次,之后每10000次保存一次权重
        if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
            if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
            char buff[256];
            sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
            save_weights(net, buff);
        }
        free_data(train);
    }
#ifdef GPU
    if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
    char buff[256];
    sprintf(buff, "%s/%s_final.weights", backup_directory, base);
    save_weights(net, buff);
}
3、进入darknet/src/network.c文件中找到train_network(network net,data d)的定义,找到train_network_datum(net)的定义,并且找到前向传播forward_network的函数定义,从而进入成本函数calc_network_cost(netp)计算的定义中。
float train_network(network *net, data d)
{
    assert(d.X.rows % net->batch == 0);
  //rows是一次加载到内存中的样本个数(batch*subdivisions),cols是样本的维度,
    int batch = net->batch;
    int n = d.X.rows / batch;//subdivision:也就是多少个batch,即加载一次需要训练几次

    int i;
    float sum = 0;
    for(i = 0; i < n; ++i){
        get_next_batch(d, batch, i*batch, net->input, net->truth);
    //完成数据拷贝,从d拷贝到网络的输入值input和真实值truth中
    //batch:每个batch里面的样本;i*batch:第几个batch;
float err = train_network_datum(net);
        sum += err;
    }
    return (float)sum/(n*batch);
}
float train_network_datum(network *net)
{
    *net->seen += net->batch;//seen是已经训练过的样本数;
    net->train = 1;
    forward_network(net); //前向传播并且计算损失
    backward_network(net);
    float error = *net->cost; //在network.c里面前向传输过程中计算cost,calc_network_cost
    if(((*net->seen)/net->batch)%net->subdivisions == 0) update_network(net);
    return error;
}
void forward_network(network *netp)
{
	// 如果有定义GPU,则调用forward_network_gpu函数完成网络的前向过程
	// forward_network_gpu函数:本文件。
#ifdef GPU
    if(netp->gpu_index >= 0){
        forward_network_gpu(netp);   
        return;
    }
#endif
    network net = *netp;
    int i;
	// 遍历所有层,从第一层到最后一层,逐层进行前向传播(网络总共有net.n层)
    for(i = 0; i < net.n; ++i){
		// 置网络当前活跃层为当前层,即第i层
        net.index = i;
		// 获取当前层
        layer l = net.layers[i];
		// 如果当前层的l.delta已经动态分配了内存,则调用fill_cpu()函数,将其所有元素的值初始化为0
        if(l.delta){
			// fill_cpu函数:src/blas.c 初始化l.delta的所有值为0,第一个参数为l.delta的元素个数,第二个参数为初始化值,为0
            fill_cpu(l.outputs * l.batch, 0, l.delta, 1);
        }
		// 前向传播(前向推理):完成l层的前向推理
        l.forward(l, net);
		// 完成某一层的推理时,置网络的输入为当前层的输出(这将成为下一层网络的输入),要注意的是,此处是直接更改指针变量net.input本身的值,
        // 也就是此处是通过改变指针net.input所指的地址来改变其中所存内容的值,并不是直接改变其所指的内容而指针所指的地址没变,
        // 所以在退出forward_network()函数后,其对net.input的改变都将失效,net.input将回到进入forward_network()之前时的值。
        net.input = l.output;
        if(l.truth) {
            net.truth = l.output;
        }
    }
	// calc_network_cost函数:本文件 计算一次前向的cost
    calc_network_cost(netp);
}
void calc_network_cost(network *netp)
{
    network net = *netp;
    int i;
    float sum = 0;
    int count = 0;
	// 在yolo v3 中  只有在yolo层计算了cost,所以这儿其实是3个yolo层的cost相加
    for(i = 0; i < net.n; ++i){
        if(net.layers[i].cost){
            sum += net.layers[i].cost[0];
            ++count;
        }
    }
	// 整个网络的cost是三个yolo层的平均cost
    *net.cost = sum/count;
}

而此时并不知道cost如何计算,上面只是赋值而已,最后要想办法找出计算cost的方法。
至于cost[0]是怎么来的,我们在cost_layer.c的56行找到l.forward=forward_cost_layer;而且前面已经执行了l.forward函数,这时候cost[0]得到了更新。

4、进入darknet/src/cost_layer.c中,找到forward_cost_layer()函数,终于找到了smooth_l1_cpu()、l1_cpu()和l2_cpu()的计算方法。
void forward_cost_layer(cost_layer l, network net)
{
    if (!net.truth) return;
    if(l.cost_type == MASKED){
        int i;
        for(i = 0; i < l.batch*l.inputs; ++i){
            if(net.truth[i] == SECRET_NUM) net.input[i] = SECRET_NUM;
        }
    }
    if(l.cost_type == SMOOTH){
        smooth_l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
    }else if(l.cost_type == L1){
        l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
    } else {
        l2_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
    }
    l.cost[0] = sum_array(l.output, l.batch*l.inputs);
}
5、进入darknet/blas.c找到mooth_l1_cpu()、l1_cpu()和l2_cpu()的定义,就可以看到如何计算每次前向传播的loss值的。
void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
    int i;
    for(i = 0; i < n; ++i){
        float diff = truth[i] - pred[i];
        float abs_val = fabs(diff);
        if(abs_val < 1) {
            error[i] = diff * diff;
            delta[i] = diff;
        }
        else {
            error[i] = 2*abs_val - 1;
            delta[i] = (diff < 0) ? 1 : -1;
        }
    }
}

void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
    int i;
    for(i = 0; i < n; ++i){
        float diff = truth[i] - pred[i];
        error[i] = fabs(diff);
        delta[i] = diff > 0 ? 1 : -1;
    }
}
void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
    int i;
    for(i = 0; i < n; ++i){
        float diff = truth[i] - pred[i];
        error[i] = diff * diff;
        delta[i] = diff;
    }
}

到目前为止,找到了以上的训练时cost的计算代码,具体请看yolo系列相关论文。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值