本文参考:
http://blog.csdn.net/u011762313/article/details/48213421
在实际项目中,为了自动化调整网络定义,需要脚本化调整网络定义参数。可以使用caffe的python接口生成caffe的网络定义文件。
代码如下:
# D:\git\DeepLearning\caffe\test\write_net.py # 使用python代码生产网络定义文件
import sys
# 引入caffe的python接口
caffe_root='D:/git/DeepLearning/caffe/build/x64/install/'
sys.path.insert(0, caffe_root+'python')
import caffe
# 引入caffe的layers,别名L
from caffe import layers as L
# 引入caffe的params,别名P
from caffe import params as P
# 定义生成网络定义文件的函数create_net
def create_net(lmdb, batch_size):
# 获取caffe的网络定义命名空间
n = caffe.NetSpec()
# 定义DataLayer
n.data, n.label = L.Data(batch_size = batch_size,
backend = P.Data.LMDB, source = lmdb,
transform_param = dict(scale = 1./255), ntop = 2)
# 定义卷积层
n.conv1 = L.Convolution(n.data, kernel_size = 5,
num_output = 20, weight_filler = dict(type = 'xavier'))
# 定义池化层
n.pool1 = L.Pooling(n.conv1, kernel_size = 2,
stride = 2, pool = P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size = 5,
num_output = 50, weight_filler = dict(type = 'xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size = 2,
stride = 2, pool = P.Pooling.MAX)
# 定义全连接层
n.ip1 = L.InnerProduct(n.pool2,
num_output = 500, weight_filler = dict(type = 'xavier'))
# 定义ReLU层
n.relu1 = L.ReLU(n.ip1, in_place = True)
n.ip2 = L.InnerProduct(n.relu1, num_output = 10,
weight_filler = dict(type = 'xavier'))
# 定义SoftmaxWithLossLayer层
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
# 调用定义函数,生成网络定义文件
# 生成的网络定义文件和本python文件write_net.py在同一个目录
# 生成训练网络write_net_train.prototxt
with open('write_net_train.prototxt', 'w') as f:
f.write(str(create_net('examples/mnist/mnist_train_lmdb', 64)))
# 生成测试网络write_net_test.prototxt
with open('write_net_test.prototxt', 'w') as f:
f.write(str(create_net('examples/mnist/mnist_test_lmdb', 32)))
# 在caffe根目录(D:\git\DeepLearning\caffe)下,
# 运行下面语句,生成网络图
# python python\draw_net.py test\write_net_train.prototxt build\x64\tmp\write_net_train_test01.png --rankdir=BT
# python python\draw_net.py test\write_net_test.prototxt build\x64\tmp\write_net_test_test01.png --rankdir=BT