推荐一个关于caffe的博客,非常优秀:Caffe学习笔记。
各项内容都写的非常详细。
使用pycaffe定义net层
# 生成 caffe 网络
import caffe
def create_net():
net = caffe.NetSpec() # 该对象 用来存放层结构
# 定义数据层
net.data, net.label = caffe.layers.Data(source = 'data.lmdb', # 数据定义为 lmdb 格式
backend = caffe.params.Data.LMDB,
batch_size=32,
ntop = 2, # 数据的个数 data 和 label
transform_param = dict(crop_size=40, mirror=True) # 数据变换参数
# crop_size 指定从原图随机crop的尺寸
# mirror 是否要对输入图像采用随机水平镜像,不太理解其作用
)
# 定义卷积层
net.conv1 = caffe.layers.Convolution(net.data, # 输入数据
num_output = 20, # 输出层个数
kernel_size = 5, # 卷积核大小
weight_filler={"type":"xavier"},# 卷积核初始化参数
bias_filler = {"type":"xavier"}
)
# 激活层
net.relu1 = caffe.layers.ReLU(net.conv1, in_place=True) # in_place 什么意思?
net.pool1 = caffe.layers.Pooling(net.relu1,
pool = caffe.params.Pooling.MAX,
kernel_size = 3,
stride = 2
)
net.conv2 = caffe.layers.Convolution(net.pool1,
pad = 1, # 补 padding
num_output = 32, # 输出层个数
kernel_size = 3, # 卷积核大小
weight_filler={"type":"xavier"},
bias_filler = {"type":"xavier"}
)
net.relu2 = caffe.layers.ReLU(net.conv2, in_place=True)
net.pool2 = caffe.layers.Pooling(net.relu2,
pool = caffe.params.Pooling.MAX,
kernel_size = 3,
stride = 2
)
# 定义全连接层
net.fc3 = caffe.layers.InnerProduct(net.pool2, num_output=1024, weight_filler=dict(type="xavier"))
net.relu3 = caffe.layers.ReLU(net.fc3, in_place = True)
# drop
net.drop = caffe.layers.Dropout(net.relu3, dropout_param=dict(dropout_ratio=0.5))
# 输出层
net.fc4 = caffe.layers.InnerProduct(net.drop, num_output=10, weight_filler=dict(type='xavier'))
net.loss = caffe.layers.SoftmaxWithLoss(net.fc4, net.label)
# 生成 prototxt 文件
with open('net/tt.prototxt', 'w') as f:
f.write(str(net.to_proto()))
if __name__ == "__main__":
create_net()
'''
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
transform_param {
mirror: true
crop_size: 40
}
data_param {
source: "data.lmdb"
batch_size: 32
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 20
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "xavier"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "xavier"
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc3"
type: "InnerProduct"
bottom: "pool2"
top: "fc3"
inner_product_param {
num_output: 1024
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "fc3"
top: "fc3"
}
layer {
name: "drop"
type: "Dropout"
bottom: "fc3"
top: "drop"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc4"
type: "InnerProduct"
bottom: "drop"
top: "fc4"
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc4"
bottom: "label"
top: "loss"
}
'''
测试网络代码
import sys
sys.path.append('/')
import caffe
import numpy as np
##caffemodel deploy.prototxt
# 指定 deploy 文件 路径
deploy = "/data/cnn_net/lenet/lenet_depoly.prototxt"
# 指定 已训练好的 路径
model = "/data/cnn_model/mnist/lenet/lenet_iter_1000.caffemodel"
# 初始化 测试网络 路径
net = caffe.Net(deploy, model, caffe.TEST)
# 添加 测试数据
net.blobs["data"].data[...] = np.ones((1,28,28),np.uint8)
# 因为是测试网络,仅仅需要前向传播即可
net.forward()
# 得到最终分类结果
prob = net.blobs["prob"].data[0]
print(prob)