math_functions
src/caffe/util/math_functions.cpp
caffe_axpy
template <>
void caffe_axpy<float>(const int N, const float alpha, const float* X,
float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); }
功能: Y=alpha*X+Y
N:为X和Y中element的个数
caffe_cpu_axpby
template <>
void caffe_cpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
cblas_saxpby(N, alpha, X, 1, beta, Y, 1);
}
功能:Y= alpha*X+beta*Y
caffe_set 初始化内存
caffe_set(const int N, const Dtype alpha, Dtype* Y)
把Y指向的大小为N的内存初始化为alpha
caffe_cpu_dot 矩阵点乘
caffe_cpu_gemm 矩阵和矩阵相乘
template<>
void caffe_cpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B,
ldb, beta, C, N);
}
功能: C=alpha*A*B+beta*C
A,B,C 是输入矩阵(一维数组格式)
CblasRowMajor :数据是行主序的(二维数据也是用一维数组储存的)
TransA, TransB:是否要对A和B做转置操作(CblasTrans CblasNoTrans)
M: A、C 的行数
N: B、C 的列数
K: A 的列数, B 的行数
lda : A的列数(不做转置)行数(做转置)
ldb: B的列数(不做转置)行数(做转置)
caffe_cpu_gemv 矩阵和数乘
template <>
void caffe_cpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) ;
cblas_sgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1);
}
功能: y=alpha*A*x+beta*y
其中X和Y是向量,A 是矩阵
M:A 的行数
N:A 的列数
cblas_sgemv 中的 参数1 表示对X和Y的每个元素都进行操作
caffe_scal 缩小或者放大
template <>
void caffe_scal<float>(const int N, const float alpha, float *X) {
cblas_sscal(N, alpha, X, 1);
}
功能:X = alpha*X
N: X中element的个数
caffe_copy 复制内存
template <typename Dtype>
void caffe_copy(const int N, const Dtype* X, Dtype* Y) { // 复制X指向的内存到Y指向的内存
if (X != Y) {
if (Caffe::mode() == Caffe::GPU) {
#ifndef CPU_ONLY
// NOLINT_NEXT_LINE(caffe/alt_fn)
CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault));
#else
NO_GPU;
#endif
} else {
memcpy(Y, X, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn)
}
}
}
im2col
src/caffe/util/im2col.cpp
im2col_cpu转换卷积形式
im2col_cpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col)
卷积操作的铺垫,不是卷积,只是把输入的图像转换为做卷积操作适合的格式,有点抽象,但是下文就能够讲通是什么意思,详细代码和操作工程见
template <typename Dtype>
BaseDataLayer<Dtype>::BaseDataLayer(const LayerParameter& param)
: Layer<Dtype>(param),
transform_param_(param.transform_param()) {
}
LayerSetUp
template <typename Dtype>
void BaseDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (top.size() == 1) { // top size 为1, 则只输出data,若topsize为2 ,则既输出data,有输出label
output_labels_ = false;
} else {
output_labels_ = true;
}
data_transformer_.reset(
new DataTransformer<Dtype>(transform_param_, this->phase_));
data_transformer_->InitRand();
// The subclasses should setup the size of bottom and top
DataLayerSetUp(bottom, top);
}
BasePrefechingDataLayer
构造函数
template <typename Dtype>
BasePrefetchingDataLayer<Dtype>::BasePrefetchingDataLayer(
const LayerParameter& param)
: BaseDataLayer<Dtype>(param),
prefetch_(param.data_param().prefetch()),
prefetch_free_(), prefetch_full_(), prefetch_current_() {
for (int i = 0; i < prefetch_.size(); ++i) {
prefetch_[i].reset(new Batch<Dtype>());
prefetch_free_.push(prefetch_[i].get()); // 把batch对象压入prefetch_free_中
}
}
LayerSetUp
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
BaseDataLayer<Dtype>::LayerSetUp(bottom, top);
// Before starting the prefetch thread, we make cpu_data and gpu_data
// calls so that the prefetch thread does not accidentally make simultaneous
// cudaMalloc calls when the main thread is running. In some GPUs this
// seems to cause failures if we do not so.
for (int i = 0; i < prefetch_.size(); ++i) {
prefetch_[i]->data_.mutable_cpu_data(); // prefetch_获取数据
if (this->output_labels_) {
prefetch_[i]->label_.mutable_cpu_data(); // prefetch_ 获取标签
}
}
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
for (int i = 0; i < prefetch_.size(); ++i) {
prefetch_[i]->data_.mutable_gpu_data();
if (this->output_labels_) {
prefetch_[i]->label_.mutable_gpu_data();
}
}
}
#endif
DLOG(INFO) << "Initializing prefetch";
this->data_transformer_->InitRand();
StartInternalThread();
DLOG(INFO) << "Prefetch initialized.";
}
InternalThreadEntry
加载数据入口,
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::InternalThreadEntry() {
#ifndef CPU_ONLY
cudaStream_t stream;
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
}
#endif
try {
while (!must_stop()) {
Batch<Dtype>* batch = prefetch_free_.pop(); // 弹出一个空闲batch
load_batch(batch); // 载入数据
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
batch->data_.data().get()->async_gpu_push(stream);
if (this->output_labels_) {
batch->label_.data().get()->async_gpu_push(stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#endif
prefetch_full_.push(batch);
}
} catch (boost::thread_interrupted&) {
// Interrupted exception is expected on shutdown
}
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaStreamDestroy(stream));
}
#endif
}
Forward_cpu
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if (prefetch_current_) {
prefetch_free_.push(prefetch_current_);
}
prefetch_current_ = prefetch_full_.pop("Waiting for data");
// Reshape to loaded data. Top[0]中放入data
top[0]->ReshapeLike(prefetch_current_->data_); // top data变形
top[0]->set_cpu_data(prefetch_current_->data_.mutable_cpu_data()); // 把当前batch的数据拷贝到top
if (this->output_labels_) {
// Reshape to loaded labels., top[1]中放入label
top[1]->ReshapeLike(prefetch_current_->label_);
top[1]->set_cpu_data(prefetch_current_->label_.mutable_cpu_data());
}
}