caffe(五)Solver解析

Solver

src/caffe/solver.cpp

Init
template <typename Dtype>
void Solver<Dtype>::Init(const SolverParameter& param) {
    LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: "
        << std::endl << param.DebugString();
    param_ = param;
    CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative.";
    CheckSnapshotWritePermissions();
    if (param_.random_seed() >= 0) {
        Caffe::set_random_seed(param_.random_seed() + Caffe::solver_rank());
    }
    // Scaffolding code
    InitTrainNet();
    InitTestNets();
    if (Caffe::root_solver()) {
        LOG(INFO) << "Solver scaffolding done.";
    }
    iter_ = 0;
    current_step_ = 0;
    }
InitTrainNet
template <typename Dtype>
    void Solver<Dtype>::InitTrainNet() {
    const int num_train_nets = param_.has_net() + param_.has_net_param() +
            param_.has_train_net() + param_.has_train_net_param();
    const string& field_names = "net, net_param, train_net, train_net_param";
    CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net "
            << "using one of these fields: " << field_names;
    CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than "
            << "one of these fields specifying a train_net: " << field_names;
    NetParameter net_param;
    if (param_.has_train_net_param()) {
            LOG_IF(INFO, Caffe::root_solver())
                << "Creating training net specified in train_net_param.";
            net_param.CopyFrom(param_.train_net_param());
    } else if (param_.has_train_net()) {
            LOG_IF(INFO, Caffe::root_solver())
                << "Creating training net from train_net file: " << param_.train_net();
                ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param);
    }
    if (param_.has_net_param()) {
        LOG_IF(INFO, Caffe::root_solver())
            << "Creating training net specified in net_param.";
        net_param.CopyFrom(param_.net_param());
    }
    if (param_.has_net()) {
            LOG_IF(INFO, Caffe::root_solver())
                << "Creating training net from net file: " << param_.net();
            ReadNetParamsFromTextFileOrDie(param_.net(), &net_param);
    }
    // Set the correct NetState.  We start with the solver defaults (lowest
    // precedence); then, merge in any NetState specified by the net_param itself;
    // finally, merge in any NetState specified by the train_state (highest
    // precedence).
    NetState net_state;
    net_state.set_phase(TRAIN);
    net_state.MergeFrom(net_param.state());
    net_state.MergeFrom(param_.train_state());
    net_param.mutable_state()->CopyFrom(net_state);
    net_.reset(new Net<Dtype>(net_param));
    }
InitTestNets
和InitTrainNet 大致相同,因为test net可能有多个,因而需要对所有的nets都进行初始化,


<h5 id='3.6.4'> Test </h5>
template <typename Dtype>
void Solver<Dtype>::Test(const int test_net_id) {
  CHECK(Caffe::root_solver());
  LOG(INFO) << "Iteration " << iter_
            << ", Testing net (#" << test_net_id << ")";
  CHECK_NOTNULL(test_nets_[test_net_id].get())->
      ShareTrainedLayersWith(net_.get());
  vector<Dtype> test_score;
  vector<int> test_score_output_id;
  const shared_ptr<Net<Dtype> >& test_net = test_nets_[test_net_id]; // 第test_net_id个test net
  Dtype loss = 0;
  for (int i = 0; i < param_.test_iter(test_net_id); ++i) { // 循环一次test网络
    SolverAction::Enum request = GetRequestedAction();
    // Check to see if stoppage of testing/training has been requested.
    while (request != SolverAction::NONE) {
        if (SolverAction::SNAPSHOT == request) {
          Snapshot();
        } else if (SolverAction::STOP == request) {
          requested_early_exit_ = true;
        }
        request = GetRequestedAction();
    }
    if (requested_early_exit_) {
      // break out of test loop.
      break;
    }

    Dtype iter_loss;
    const vector<Blob<Dtype>*>& result = test_net->Forward(&iter_loss); // 计算网络的输出和损失
    if (param_.test_compute_loss()) {
      loss += iter_loss; // 叠加loss
    }
    if (i == 0) {
      for (int j = 0; j < result.size(); ++j) {
        const Dtype* result_vec = result[j]->cpu_data();
        for (int k = 0; k < result[j]->count(); ++k) {
          test_score.push_back(result_vec[k]);
          test_score_output_id.push_back(j); // 把每个output_blob的输出结果和id 记录下来
        }
      }
    } else {
      int idx = 0;
      for (int j = 0; j < result.size(); ++j) {
        const Dtype* result_vec = result[j]->cpu_data();
        for (int k = 0; k < result[j]->count(); ++k) {
          test_score[idx++] += result_vec[k]; // 如果不是第一次大循环,那么就叠加所有的test_score
        }
      }
    }
  }
  if (requested_early_exit_) {
    LOG(INFO)     << "Test interrupted.";
    return;
      }
    if (param_.test_compute_loss()) {
    loss /= param_.test_iter(test_net_id); // 平均loss
    LOG(INFO) << "Test loss: " << loss;
  }
  for (int i = 0; i < test_score.size(); ++i) {
    const int output_blob_index =
        test_net->output_blob_indices()[test_score_output_id[i]];
    const string& output_name = test_net->blob_names()[output_blob_index];
    const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; // 
    ostringstream loss_msg_stream;
    const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); // 平均分数
    if (loss_weight) {
      loss_msg_stream << " (* " << loss_weight
                      << " = " << loss_weight * mean_score << " loss)";
    }
    LOG(INFO) << "    Test net output #" << i << ": " << output_name << " = "
              << mean_score << loss_msg_stream.str();
  }
}
UpdateSmoothedLoss

把loss平滑掉,

template <typename Dtype>
    void Solver<Dtype>::UpdateSmoothedLoss(Dtype loss, int start_iter,
        int average_loss) {
  if (losses_.size() < average_loss) {
    losses_.push_back(loss);
    int size = losses_.size();
    smoothed_loss_ = (smoothed_loss_ * (size - 1) + loss) / size; // 赤果果的求平均值
  } else {
    int idx = (iter_ - start_iter) % average_loss;
    smoothed_loss_ += (loss - losses_[idx]) / average_loss; // 把变化平滑掉
    losses_[idx] = loss;
  }
    }
Step

在每个阶段(这里的阶段是指train或者validation或者test)进行forward和backward

template <typename Dtype>
    void Solver<Dtype>::Step(int iters) {
    const int start_iter = iter_;
    const int stop_iter = iter_ + iters;
    int average_loss = this->param_.average_loss(); // loss 的平滑缓冲区
    losses_.clear();
    smoothed_loss_ = 0; // 平滑缓冲区输出的平均值
    iteration_timer_.Start();

    while (iter_ < stop_iter) {
            // zero-init the params
            net_->ClearParamDiffs();
            if (param_.test_interval() && iter_ % param_.test_interval() == 0
                && (iter_ > 0 || param_.test_initialization())) { // 满足所有符合test的条件,开始test
            if (Caffe::root_solver()) {
                TestAll();
            }
            if (requested_early_exit_) {
                // Break out of the while loop because stop was requested while testing.
                break;
            }
        }

        for (int i = 0; i < callbacks_.size(); ++i) {
            callbacks_[i]->on_start();
        }
        const bool display = param_.display() && iter_ % param_.display() == 0;
        net_->set_debug_info(display && param_.debug_info());
        // accumulate the loss and gradient
        Dtype loss = 0;
        for (int i = 0; i < param_.iter_size(); ++i) {
            loss += net_->ForwardBackward();
        }
        loss /= param_.iter_size();
        // average the loss across iterations for smoothed reporting
        UpdateSmoothedLoss(loss, start_iter, average_loss);
        if (display) {
            float lapse = iteration_timer_.Seconds();
            float per_s = (iter_ - iterations_last_) / (lapse ? lapse : 1);
            LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_
                << " (" << per_s << " iter/s, " << lapse << "s/"
                << param_.display() << " iters), loss = " << smoothed_loss_;
            iteration_timer_.Start();
            iterations_last_ = iter_;
            const vector<Blob<Dtype>*>& result = net_->output_blobs();
            int score_index = 0for (int j = 0; j < result.size(); ++j) {
                const Dtype* result_vec = result[j]->cpu_data();
                const string& output_name =
                        net_->blob_names()[net_->output_blob_indices()[j]];
                const Dtype loss_weight =
                        net_->blob_loss_weights()[net_->output_blob_indices()[j]];
                for (int k = 0; k < result[j]->count(); ++k) {
                    ostringstream loss_msg_stream;
                    if (loss_weight) {
                        loss_msg_stream << " (* " << loss_weight
                            << " = " << loss_weight * result_vec[k] << " loss)";
                }
                LOG_IF(INFO, Caffe::root_solver()) << "    Train net output #"
                    << score_index++ << ": " << output_name << " = "
                    << result_vec[k] << loss_msg_stream.str();
                }
            }
        }
        for (int i = 0; i < callbacks_.size(); ++i) {
            callbacks_[i]->on_gradients_ready();
        }
        ApplyUpdate();

        // Increment the internal iter_ counter -- its value should always indicate
        // the number of times the weights have been updated.
        ++iter_;

        SolverAction::Enum request = GetRequestedAction();

        // Save a snapshot if needed.
        if ((param_.snapshot()
            && iter_ % param_.snapshot() == 0
            && Caffe::root_solver()) ||
            (request == SolverAction::SNAPSHOT)) {
            Snapshot();
        }
        if (SolverAction::STOP == request) {
            requested_early_exit_ = true;
            // Break out of training loop.
            break;
        }
  }
}
Solve

初始化完成之后就会调用Solve函数,进而调用Step函数完成训练,

template <typename Dtype>  
void Solver<Dtype>::Solve(const char* resume_file) {  
  CHECK(Caffe::root_solver());  
  LOG(INFO) << "Solving " << net_->name();  
  LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy();  

  //任何时候开始求解,初始化失败  
  requested_early_exit_ = false;  

  if (resume_file) {  
    LOG(INFO) << "Restoring previous solver status from " << resume_file;  
    Restore(resume_file);  
  }  

  // For a network that is trained by the solver, no bottom or top vecs  
  // should be given, and we will just provide dummy vecs.  
  //对于一个正在训练的网络,没有bottom或top向量被给,而且仅仅提供dummy vecs  
  int start_iter = iter_;  
  Step(param_.max_iter() - iter_);  
  // If we haven't already, save a snapshot after optimization, unless  
  // overridden by setting snapshot_after_train := false  
  if (param_.snapshot_after_train()  
      && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) {  
    Snapshot();  
  }  
  if (requested_early_exit_) {  
    LOG(INFO) << "Optimization stopped early.";  
    return;  
  }  
  //在优化完后,运行一个额外的训练和测试过程展示训练测试的loss或者输出。  
  if (param_.display() && iter_ % param_.display() == 0) {  
    int average_loss = this->param_.average_loss();  
    Dtype loss;  
    net_->Forward(&loss);  

    UpdateSmoothedLoss(loss, start_iter, average_loss);  

    LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss_;  
  }  
  if (param_.test_interval() && iter_ % param_.test_interval() == 0) {  
    TestAll();  
  }  
  LOG(INFO) << "Optimization Done.";  
}
sgdSolver
ClipGradients

修正gradient, 如果所有diff的l2_norm 比clip_gradients 大,则所有的diff都缩小 clip_gradients/l2_norm_diff倍

template <typename Dtype>
void SGDSolver<Dtype>::ClipGradients() {
  const Dtype clip_gradients = this->param_.clip_gradients();
  if (clip_gradients < 0) { return; }
  const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();
  Dtype sumsq_diff = 0;
  for (int i = 0; i < net_params.size(); ++i) {
    sumsq_diff += net_params[i]->sumsq_diff();
  }
  const Dtype l2norm_diff = std::sqrt(sumsq_diff);
  if (l2norm_diff > clip_gradients) {
    Dtype scale_factor = clip_gradients / l2norm_diff;
    LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm "
        << l2norm_diff << " > " << clip_gradients << ") "
        << "by scale factor " << scale_factor;
    for (int i = 0; i < net_params.size(); ++i) {
      net_params[i]->scale_diff(scale_factor);
    }
  }
}
Normalize

归一化,其实就是把循环iter_size_次之后的diff除以iter_size_

template <typename Dtype>
void SGDSolver<Dtype>::Normalize(int param_id) {
  if (this->param_.iter_size() == 1) { return; }
  // Scale gradient to counterbalance accumulation.
  const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();
  const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size();
  switch (Caffe::mode()) {
  case Caffe::CPU: {
    caffe_scal(net_params[param_id]->count(), accum_normalization,
        net_params[param_id]->mutable_cpu_diff());
    break;
  }
  case Caffe::GPU: {
#ifndef CPU_ONLY
    caffe_gpu_scal(net_params[param_id]->count(), accum_normalization,
        net_params[param_id]->mutable_gpu_diff());
#else
    NO_GPU;
#endif
    break;
  }
  default:
    LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode();
  }
}
Regularize

正则化,分为L2和L1,这里只列举cpu模式

template <typename Dtype>
void SGDSolver<Dtype>::Regularize(int param_id) {
  const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();
  const vector<float>& net_params_weight_decay =
      this->net_->params_weight_decay();
  Dtype weight_decay = this->param_.weight_decay();
  string regularization_type = this->param_.regularization_type();
  Dtype local_decay = weight_decay * net_params_weight_decay[param_id];
  switch (Caffe::mode()) {
  case Caffe::CPU: {
    if (local_decay) {
      if (regularization_type == "L2") {
        // add weight decay
        caffe_axpy(net_params[param_id]->count(),
            local_decay,
            net_params[param_id]->cpu_data(),
            net_params[param_id]->mutable_cpu_diff()); // diff = diff + λ*l_r*w(L2求导)
      } else if (regularization_type == "L1") {
        caffe_cpu_sign(net_params[param_id]->count(),
            net_params[param_id]->cpu_data(),
            temp_[param_id]->mutable_cpu_data()); 
        caffe_axpy(net_params[param_id]->count(),
            local_decay,
            temp_[param_id]->cpu_data(),
            net_params[param_id]->mutable_cpu_diff()); // diff = diff + λ* l_r* |w| (L1求导)
      } else {
        LOG(FATAL) << "Unknown regularization type: " << regularization_type;
      }
    }
    break;
  }
ComputeUpdateValue

更新参数,把更新过后的参数在history_中备份,而后把history_中的参数拷贝到netparam中

template <typename Dtype>
void SGDSolver<Dtype>::ComputeUpdateValue(int param_id, Dtype rate) {
  const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();
  const vector<float>& net_params_lr = this->net_->params_lr();
  Dtype momentum = this->param_.momentum();
  Dtype local_rate = rate * net_params_lr[param_id];
  // Compute the update to history, then copy it to the parameter diff.
  switch (Caffe::mode()) {
  case Caffe::CPU: {
    caffe_cpu_axpby(net_params[param_id]->count(), local_rate,
              net_params[param_id]->cpu_diff(), momentum,
              history_[param_id]->mutable_cpu_data()); // 更新的是history_的参数,采用                                                    momentum
    caffe_copy(net_params[param_id]->count(),
        history_[param_id]->cpu_data(),
        net_params[param_id]->mutable_cpu_diff());
    break;
  }
  case Caffe::GPU: {
#ifndef CPU_ONLY
    sgd_update_gpu(net_params[param_id]->count(),
        net_params[param_id]->mutable_gpu_diff(),
        history_[param_id]->mutable_gpu_data(),
        momentum, local_rate);
#else
    NO_GPU;
#endif
    break;
  }
  default:
    LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode();
  }
}
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值