利用caffe的Python接口生成prototxt文件

Python版本:Python2.7
mnist数据集
博客来源:http://blog.csdn.net/c406495762/article/details/70306550

如何编译caffe的Python接口就不多说了
下面的代码可以一次生成Lenet网络训练所需的train.prototxt和test.prototxt,还有solver.prototxt

代码:

# -*- coding: UTF-8 -*-
import caffe                                                     #导入caffe包

def create_net(lmdb, mean_file, batch_size, include_acc=False):
    #网络规范
    net = caffe.NetSpec()
    #Data层
    net.data, net.label = caffe.layers.Data(source=lmdb, backend=caffe.params.Data.LMDB, batch_size=batch_size, ntop=2,
                                            transform_param = dict(mean_file=mean_file,scale= 0.00390625))
    #视觉层
    net.conv1 = caffe.layers.Convolution(net.data, num_output=20,kernel_size=5,weight_filler={"type": "xavier"},bias_filler={"type": "constant"})
    net.pool1 = caffe.layers.Pooling(net.conv1, pool=caffe.params.Pooling.MAX, kernel_size=2, stride=2)
    net.conv2 = caffe.layers.Convolution(net.pool1, kernel_size=5, stride=1,num_output=50, pad=0,weight_filler=dict(type='xavier'),bias_filler={"type": "constant"})
    net.pool2 = caffe.layers.Pooling(net.conv2, pool=caffe.params.Pooling.MAX, kernel_size=2, stride=2)
    #全连接层
    net.fc1 = caffe.layers.InnerProduct(net.pool2, num_output=500,weight_filler=dict(type='xavier'),bias_filler={"type": "constant"})
    net.fc_add1 = caffe.layers.InnerProduct(net.fc1, num_output=500,weight_filler=dict(type='xavier'),bias_filler={"type": "constant"})#没什么意义,加一层试试
    net.fc_add2 = caffe.layers.InnerProduct(net.fc_add1, num_output=500,weight_filler=dict(type='xavier'),bias_filler={"type": "constant"})#也没什么意义,再加一层试试
    #激活层
    net.relu1 = caffe.layers.ReLU(net.fc_add2, in_place=True)
    #dropout层
    net.drop3 = caffe.layers.Dropout(net.fc_add2, in_place=True)
    net.fc2 = caffe.layers.InnerProduct(net.fc_add2, num_output=10,weight_filler=dict(type='xavier'))
    #sofemax层
    net.loss = caffe.layers.SoftmaxWithLoss(net.fc2, net.label)
    #训练的prototxt文件不包括Accuracy层,测试的时候需要。
    if include_acc:
        net.acc = caffe.layers.Accuracy(net.fc2, net.label)
        return str(net.to_proto())

    return str(net.to_proto())

def write_net(mean_file,train_proto, train_lmdb, test_proto, val_lmdb):
    #写入prototxt文件
    with open(train_proto, 'w') as f:
        f.write(str(create_net(train_lmdb,mean_file,batch_size = 64)))
    #写入prototxt文件
    with open(test_proto, 'w') as f:
        f.write(str(create_net(val_lmdb,mean_file,batch_size = 100, include_acc = True)))

def write_sovler(my_project_root, solver_proto, train_proto, test_proto):
    sovler_string = caffe.proto.caffe_pb2.SolverParameter()                    #sovler存储
    sovler_string.train_net = train_proto                                    #train.prototxt位置指定
    sovler_string.test_net.append(test_proto)                                 #test.prototxt位置指定
    sovler_string.test_iter.append(100)                                        #10000/100 测试迭代次数
    sovler_string.test_interval = 938                                        #60000/64 每训练迭代test_interval次进行一次测试
    sovler_string.base_lr = 0.01                                            #基础学习率
    sovler_string.momentum = 0.9                                            #动量
    sovler_string.weight_decay = 5e-4                                        #权重衰减
    sovler_string.lr_policy = 'step'                                        #学习策略
    sovler_string.stepsize = 3000                                             #学习率变化频率
    sovler_string.gamma = 0.1                                                  #学习率变化指数
    sovler_string.display = 20                                                #每迭代display次显示结果
    sovler_string.max_iter = 9380                                            #10 epoch 938*10 最大迭代数
    sovler_string.snapshot = 938                                             #保存临时模型的迭代数
    sovler_string.snapshot_prefix = my_project_root + 'mnist/model/mnist'                #模型前缀
    sovler_string.solver_mode = caffe.proto.caffe_pb2.SolverParameter.GPU    #优化模式

    with open(solver_proto, 'w') as f:
        f.write(str(sovler_string))


#def train(solver_proto):
#    caffe.set_device(0)
#    caffe.set_mode_gpu()
#    solver = caffe.SGDSolver(solver_proto)
#    solver.solve()


if __name__ == '__main__':
    my_project_root = "F:/python/make_prototxt/"    #my-caffe-project目录
    train_lmdb = my_project_root + "mnist/data/mnist_train_lmdb"                #train_lmdb文件的位置
    val_lmdb = my_project_root + "mnist/data/mnist_test_lmdb"                    #val_lmdb文件的位置
    train_proto = my_project_root + "mnist/train.prototxt"                #保存train.prototxt文件的位置
    test_proto = my_project_root + "mnist/test.prototxt"                #保存test.prototxt文件的位置
    solver_proto = my_project_root + "mnist/solver.prototxt"            #保存solver.prototxt文件的位置
    mean_file = my_project_root + "mnist/data/trainMean.binaryproto"                          #均值文件的位置

    write_net(mean_file,train_proto, train_lmdb, test_proto, val_lmdb)
    print "生成train.prototxt test.prototxt成功"
    write_sovler(my_project_root, solver_proto, train_proto, test_proto)
    print "生成solver.prototxt成功"
   # train(solver_proto)
   # print "训练完成"

运行结果:
生成的train.prototxt

layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  transform_param {
    scale: 0.00390625
    mean_file: "F:/python/make_prototxt/mnist/data/trainMean.binaryproto"
  }
  data_param {
    source: "F:/python/make_prototxt/mnist/data/mnist_train_lmdb"
    batch_size: 64
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 20
    kernel_size: 5
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  convolution_param {
    num_output: 50
    pad: 0
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "fc1"
  type: "InnerProduct"
  bottom: "pool2"
  top: "fc1"
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "fc_add1"
  type: "InnerProduct"
  bottom: "fc1"
  top: "fc_add1"
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "fc_add2"
  type: "InnerProduct"
  bottom: "fc_add1"
  top: "fc_add2"
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "fc_add2"
  top: "fc_add2"
}
layer {
  name: "drop3"
  type: "Dropout"
  bottom: "fc_add2"
  top: "fc_add2"
}
layer {
  name: "fc2"
  type: "InnerProduct"
  bottom: "fc_add2"
  top: "fc2"
  inner_product_param {
    num_output: 10
    weight_filler {
      type: "xavier"
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "fc2"
  bottom: "label"
  top: "loss"
}

生成的test.prototxt

layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  transform_param {
    scale: 0.00390625
    mean_file: "F:/python/make_prototxt/mnist/data/trainMean.binaryproto"
  }
  data_param {
    source: "F:/python/make_prototxt/mnist/data/mnist_test_lmdb"
    batch_size: 100
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 20
    kernel_size: 5
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  convolution_param {
    num_output: 50
    pad: 0
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "fc1"
  type: "InnerProduct"
  bottom: "pool2"
  top: "fc1"
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "fc_add1"
  type: "InnerProduct"
  bottom: "fc1"
  top: "fc_add1"
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "fc_add2"
  type: "InnerProduct"
  bottom: "fc_add1"
  top: "fc_add2"
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "fc_add2"
  top: "fc_add2"
}
layer {
  name: "drop3"
  type: "Dropout"
  bottom: "fc_add2"
  top: "fc_add2"
}
layer {
  name: "fc2"
  type: "InnerProduct"
  bottom: "fc_add2"
  top: "fc2"
  inner_product_param {
    num_output: 10
    weight_filler {
      type: "xavier"
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "fc2"
  bottom: "label"
  top: "loss"
}
layer {
  name: "acc"
  type: "Accuracy"
  bottom: "fc2"
  bottom: "label"
  top: "acc"
}

生成的solver.prototxt

train_net: "F:/python/make_prototxt/mnist/train.prototxt"
test_net: "F:/python/make_prototxt/mnist/test.prototxt"
test_iter: 100
test_interval: 938
base_lr: 0.01
display: 20
max_iter: 9380
lr_policy: "step"
gamma: 0.1
momentum: 0.9
weight_decay: 0.0005
stepsize: 3000
snapshot: 938
snapshot_prefix: "F:/python/make_prototxt/mnist/model/mnist"
solver_mode: GPU

接下来就可以训练了

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值