caffe入门基础篇

目录

一、caffe配置文件介绍

二、标准层的定义

 三、网络微调技巧

四、Linux脚本使用及LMDB文件生成

五、带你设计一个Caffe网络,用于分类任务


一、caffe配置文件介绍

 

二、标准层的定义

 

 三、网络微调技巧

 

其中,multistep最为常用

四、Linux脚本使用及LMDB文件生成

五、带你设计一个Caffe网络,用于分类任务

下面:

使用pycaffe生成solver配置

使用pycaffe生成caffe测试网络和训练网络

 

数据集下载

# demoCaffe

数据集下载,cifar mnist:
百度云盘:

链接: https://pan.baidu.com/s/1bHFQUz7Q6BMBZv25AhsXKQ 密码: dva9
链接: https://pan.baidu.com/s/1rPRjf2hanlYYjBQQDmIjNQ 密码: 5nhv

1. lmdb数据制作:

手动实现: https://blog.csdn.net/yx2017/article/details/72953537   

               https://www.jianshu.com/p/9d7ed35960cb

代码实现:https://www.cnblogs.com/leemo-o/p/4990021.html

                 https://www.jianshu.com/p/ef84715e0fdc

以下仅供对比阅读:

demo_lmdb.py:  生成lmdb格式数据

 

import lmdb
import numpy as np
import cv2
import caffe
from caffe.proto import caffe_pb2

def write():
    # basic setting

    lmdb_file = 'lmdb_data'
    batch_size = 256


    lmdb_env = lmdb.open(lmdb_file, map_size = int(1e12))

    lmdb_txn = lmdb_env.begin(write = True)

    for x in range(batch_size):
        data = np.ones((3, 64, 64), np.uint8)
        label = x

        datum = caffe.io.array_to_datum(data,label)
        keystr = "{:0>8d}".format(x)

        lmdb_txn.put(keystr, datum.SerializeToString())

    lmdb_txn.commit()

def read():
    lmdb_env = lmdb.open('lmdb_data')
    lmdb_txt = lmdb_env.begin()

    datum = caffe_pb2.Datum()

    for key, value in lmdb_txt.cursor():

        datum.ParseFromString(value)

        label = datum.label

        data = caffe.io.datum_to_array(datum)

        print(label)
        print(data)


if __name__ == '__main__':
    write()
    read()

demo_create_solver.py:  生成solver配置文件

from caffe.proto import caffe_pb2

s = caffe_pb2.SolverParameter()

s.train_net = "train.prototxt"
s.test_net.append("test.prototxt")

s.test_interval = 100
s.test_iter.append(10)

s.max_iter = 1000

s.base_lr = 0.1

s.weight_decay = 5e-4

s.lr_policy = "step"

s.display = 10

s.snapshot = 10

s.snapshot_prefix = "model"

s.type = "SGD"

s.solver_mode = caffe_pb2.SolverParameter.GPU

with open("net/s.prototxt", "w") as f:
    f.write(str(s))




结果如下

train_net: "/home/kuan/PycharmProjects/demo_cnn_net/net/train.prototxt"
test_net: "/home/kuan/PycharmProjects/demo_cnn_net/net/test.prototxt"
test_iter: 1000
test_interval: 100
base_lr: 0.10000000149
display: 100
max_iter: 100000
lr_policy: "step"
weight_decay: 0.000500000023749
snapshot: 100
snapshot_prefix: "/home/kuan/PycharmProjects/demo_cnn_net/cnn_model/mnist/lenet/"
solver_mode: GPU
type: "SGD"

demo_creat_net.py:    创建网络

import caffe

def create_net():
    net = caffe.NetSpec()

    net.data, net.label = caffe.layers.Data(source="data.lmdb",
                                            backend=caffe.params.Data.LMDB,
                                            batch_size=32,
                                            ntop=2,  #数据层数据个数,分别为data,label
                                            transform_param=dict(crop_size=40, mirror=True)
                                            )

    net.conv1 = caffe.layers.Convolution(net.data, num_output=20, kernel_size=5,
                                         weight_filler={"type": "xavier"},
                                         bias_filler={"type":"xavier"})  #卷积核参数

    net.relu1 = caffe.layers.ReLU(net.conv1, in_place=True)

    net.pool1 = caffe.layers.Pooling(net.relu1, pool=caffe.params.Pooling.MAX,
                                     kernel_size=3, stride=2)

    net.conv2 = caffe.layers.Convolution(net.pool1, num_output=32, kernel_size=3, 
                                         pad=1,
                                         weight_filler={"type": "xavier"},
                                         bias_filler={"type": "xavier"})

    net.relu2 = caffe.layers.ReLU(net.conv2, in_place=True)

    net.pool2 = caffe.layers.Pooling(net.relu2, pool=caffe.params.Pooling.MAX,
                                     kernel_size=3, stride=2)
    #下面为全连接层
    net.fc3 = caffe.layers.InnerProduct(net.pool2, num_output=1024, weight_filler=dict(type='xavier'))

    net.relu3 = caffe.layers.ReLU(net.fc3, in_place=True)

    ##drop
    net.drop = caffe.layers.Dropout(net.relu3, dropout_param=dict(dropout_ratio=0.5))

    net.fc4 = caffe.layers.InnerProduct(net.drop, num_output=10, weight_filler=dict(type='xavier'))

    net.loss = caffe.layers.SoftmaxWithLoss(net.fc4, net.label)

    with open("net/tt.prototxt", 'w') as f:
        f.write(str(net.to_proto()))


if __name__ == '__main__':
    create_net()

生成结果如下

layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  transform_param {
    mirror: true
    crop_size: 40
  }
  data_param {
    source: "/home/kuan/PycharmProjects/demo_cnn_net/lmdb_data"
    batch_size: 32
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 20
    kernel_size: 5
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  convolution_param {
    num_output: 32
    pad: 1
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc3"
  type: "InnerProduct"
  bottom: "pool2"
  top: "fc3"
  inner_product_param {
    num_output: 1024
    weight_filler {
      type: "xavier"
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "fc3"
  top: "fc3"
}
layer {
  name: "drop"
  type: "Dropout"
  bottom: "fc3"
  top: "drop"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc4"
  type: "InnerProduct"
  bottom: "drop"
  top: "fc4"
  inner_product_param {
    num_output: 10
    weight_filler {
      type: "xavier"
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "fc4"
  bottom: "label"
  top: "loss"
}

demo_train.py训练网络:

import sys
sys.path.append('/home/kuan/AM-softmax_caffe/python')
import caffe

solver = caffe.SGDSolver("/home/kuan/PycharmProjects/demo_cnn_net/cnn_net/alexnet/solver.prototxt")

solver.solve()

demo_test.py:测试网络

import sys
sys.path.append('/home/kuan/AM-softmax_caffe/python')
import caffe
import numpy as np

##caffemodel deploy.prototxt

deploy = "/home/kuan/PycharmProjects/demo_cnn_net/cnn_net/alexnet/deploy.prototxt"

model = "/home/kuan/PycharmProjects/demo_cnn_net/cnn_model/cifar/alexnet/alexnet_iter_110.caffemodel"

net = caffe.Net(deploy, model, caffe.TEST)


net.blobs["data"].data[...] = np.ones((3,32,32),np.uint8)

net.forward()

prob = net.blobs["prob"].data[0]

print(prob)

 

 

 

 

 

 

  • 2
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值