caffe: Python层的编写、使用

15 篇文章 0 订阅

例一:  参考:http://blog.csdn.net/eagelangel/article/details/52073407

Define a model in Python

It is also possible to define the net model directly in Python, and save it to a prototxt files. Here are the commands :

from caffe import layers as L
from caffe import params as P

def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()
    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                             transform_param=dict(scale=1./255), ntop=2)
    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()

with open('examples/mnist/lenet_auto_train.prototxt', 'w') as f:
    f.write(str(lenet('examples/mnist/mnist_train_lmdb', 64)))

with open('examples/mnist/lenet_auto_test.prototxt', 'w') as f:
    f.write(str(lenet('examples/mnist/mnist_test_lmdb', 100)))

Create your custom python layer

layer {
  name: 'MyPythonLayer'
  type: 'Python'
  top: 'output'
  bottom: 'conv'
  python_param {
    module: 'mypythonlayer'
    layer: 'MyLayer'
    param_str: "'num': 21"
  }
}

and create a mypythonlayer.py file that has to to be in the current directory or in the PYTHONPATH :

import caffe
import numpy as np
import yaml

class MyLayer(caffe.Layer):

    def setup(self, bottom, top):
        self.num = yaml.load(self.param_str)["num"]
        print "Parameter num : ", self.num

    def reshape(self, bottom, top):
        pass

    def forward(self, bottom, top):
        top[0].reshape(*bottom[0].shape)
        top[0].data[...] = bottom[0].data + self.num

    def backward(self, top, propagate_down, bottom):
        pass

This layer will simply add a value

net = caffe.Net('conv.prototxt',caffe.TEST)
im = np.array(Image.open('cat_gray.jpg'))
im_input = im[np.newaxis, np.newaxis, :, :]
net.blobs['data'].reshape(*im_input.shape)
net.blobs['data'].data[...] = im_input
net.forward()
例二:  参考:https://www.2cto.com/kf/201604/503663.html


pascal_multilabel_datalayers.py

# imports
import json
import time
import pickle
import scipy.misc
import skimage.io
import caffe
 
import numpy as np
import os.path as osp
 
from xml.dom import minidom
from random import shuffle
from threading import Thread
from PIL import Image
from tools import SimpleTransformer
 
 class PascalMultilabelDataLayerSync(caffe.Layer):
 
    """
    This is a simple syncronous datalayer for training a multilabel model on
    PASCAL.
    """
     def setup(self, bottom, top):
 
        self.top_names = ['data', 'label']
 
        # === Read input parameters ===
 
        # params is a python dictionary with layer parameters.
        params = eval(self.param_str)
 
        # Check the paramameters for validity.
        check_params(params)
 
        # store input as class variables
        self.batch_size = params['batch_size']
 
        # Create a batch loader to load the images.
        self.batch_loader = BatchLoader(params, None)
 
        # === reshape tops ===
        # since we use a fixed input image size, we can shape the data layer
        # once. Else, we'd have to do it in the reshape call.
        top[0].reshape(
            self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
        # Note the 20 channels (because PASCAL has 20 classes.)
        top[1].reshape(self.batch_size, 20)
 
        print_info("PascalMultilabelDataLayerSync", params)
 
    def forward(self, bottom, top):
        """
        Load data.
        """
        for itt in range(self.batch_size):
            # Use the batch loader to load the next image.
            im, multilabel = self.batch_loader.load_next_image()
 
            # Add directly to the caffe data layer
            top[0].data[itt, ...] = im
            top[1].data[itt, ...] = multilabel
 
    def reshape(self, bottom, top):
        """
        There is no need to reshape the data, since the input is of fixed size
        (rows and columns)
        """
        pass
 
    def backward(self, top, propagate_down, bottom):
        """
        These layers does not back propagate
        """
        pass
 
 
class BatchLoader(object): 
    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """
 
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.pascal_root = params['pascal_root']
        self.im_shape = params['im_shape']
        # get list of image indexes.
        list_file = params['split'] + '.txt'
        self.indexlist = [line.rstrip('\n') for line in open(
            osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()
 
        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
 
    def load_next_image(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self._cur == len(self.indexlist):
            self._cur = 0
            shuffle(self.indexlist)
 
        # Load an image
        index = self.indexlist[self._cur]  # Get the image index
        image_file_name = index + '.jpg'
        im = np.asarray(Image.open(
            osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
        im = scipy.misc.imresize(im, self.im_shape)  # resize
 
        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2)*2-1
        im = im[:, ::flip, :]
 
        # Load and prepare ground truth
        multilabel = np.zeros(20).astype(np.float32)
        anns = load_pascal_annotation(index, self.pascal_root)
        for label in anns['gt_classes']:
            # in the multilabel problem we don't care how MANY instances
            # there are of each class. Only if they are present.
            # The "-1" is b/c we are not interested in the background
            # class.
            multilabel[label - 1] = 1
 
        self._cur += 1
        return self.transformer.preprocess(im), multilabel
 
 
def load_pascal_annotation(index, pascal_root):
    """
    This code is borrowed from Ross Girshick's FAST-RCNN code
    (https://github.com/rbgirshick/fast-rcnn).
    It parses the PASCAL .xml metadata files.
    See publication for further details: (https://arxiv.org/abs/1504.08083).
    Thanks Ross!
    """
    classes = ('__background__',  # always index 0
               'aeroplane', 'bicycle', 'bird', 'boat',
               'bottle', 'bus', 'car', 'cat', 'chair',
                         'cow', 'diningtable', 'dog', 'horse',
                         'motorbike', 'person', 'pottedplant',
                         'sheep', 'sofa', 'train', 'tvmonitor')
    class_to_ind = dict(zip(classes, xrange(21)))
 
    filename = osp.join(pascal_root, 'Annotations', index + '.xml')
    # print 'Loading: {}'.format(filename)
 
    def get_data_from_tag(node, tag):
        return node.getElementsByTagName(tag)[0].childNodes[0].data
 
    with open(filename) as f:
        data = minidom.parseString(f.read())
 
    objs = data.getElementsByTagName('object')
    num_objs = len(objs)
 
    boxes = np.zeros((num_objs, 4), dtype=np.uint16)
    gt_classes = np.zeros((num_objs), dtype=np.int32)
    overlaps = np.zeros((num_objs, 21), dtype=np.float32)
 
    # Load object bounding boxes into a data frame.
    for ix, obj in enumerate(objs):
        # Make pixel indexes 0-based
        x1 = float(get_data_from_tag(obj, 'xmin')) - 1
        y1 = float(get_data_from_tag(obj, 'ymin')) - 1
        x2 = float(get_data_from_tag(obj, 'xmax')) - 1
        y2 = float(get_data_from_tag(obj, 'ymax')) - 1
        cls = class_to_ind[
            str(get_data_from_tag(obj, "name")).lower().strip()]
        boxes[ix, :] = [x1, y1, x2, y2]
        gt_classes[ix] = cls
        overlaps[ix, cls] = 1.0
 
    overlaps = scipy.sparse.csr_matrix(overlaps)
 
    return {'boxes': boxes,
            'gt_classes': gt_classes,
            'gt_overlaps': overlaps,
            'flipped': False,
            'index': index}
 
 
def check_params(params):
    """
    A utility function to check the parameters for the data layers.
    """
    assert 'split' in params.keys(
    ), 'Params must include split (train, val, or test).'
 
    required = ['batch_size', 'pascal_root', 'im_shape']
    for r in required:
        assert r in params.keys(), 'Params must include {}'.format(r)
 
 
def print_info(name, params):
    """
    Ouput some info regarding the class
    """
    print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
        name,
        params['split'],
        params['batch_size'],
        params['im_shape'])</code>

caffenet.py

from __future__ import print_function

from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
 
# helper function for common structures
 
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
    conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
                                num_output=nout, pad=pad, group=group)
    return conv, L.ReLU(conv, in_place=True)
 
def fc_relu(bottom, nout):
    fc = L.InnerProduct(bottom, num_output=nout)
    return fc, L.ReLU(fc, in_place=True)
 
def max_pool(bottom, ks, stride=1):
    return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
 
def caffenet(lmdb, batch_size=256, include_acc=False):
    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))
 
    # the net itself
    conv1, relu1 = conv_relu(data, 11, 96, stride=4)
    pool1 = max_pool(relu1, 3, stride=2)
    norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
    conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
    pool2 = max_pool(relu2, 3, stride=2)
    norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
    conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
    conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
    conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
    pool5 = max_pool(relu5, 3, stride=2)
    fc6, relu6 = fc_relu(pool5, 4096)
    drop6 = L.Dropout(relu6, in_place=True)
    fc7, relu7 = fc_relu(drop6, 4096)
    drop7 = L.Dropout(relu7, in_place=True)
    fc8 = L.InnerProduct(drop7, num_output=1000)
    loss = L.SoftmaxWithLoss(fc8, label)
 
    if include_acc:
        acc = L.Accuracy(fc8, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
 
def make_net():
    with open('train.prototxt', 'w') as f:
        print(caffenet('/path/to/caffe-train-lmdb'), file=f)
 
    with open('test.prototxt', 'w') as f:
        print(caffenet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True), file=f)
 
if __name__ == '__main__':
    make_net()

tools.py

import numpy as np
class SimpleTransformer:    

  """

SimpleTransformer is a simple class for preprocessing and deprocessing
   """

    def __init__(self, mean=[128, 128, 128]):
        self.mean = np.array(mean, dtype=np.float32)
        self.scale = 1.0
 
    def set_mean(self, mean):
        """
        Set the mean to subtract for centering the data.
        """
        self.mean = mean
 
    def set_scale(self, scale):
        """
        Set the data scaling.
        """
        self.scale = scale
 
    def preprocess(self, im):
        """
        preprocess() emulate the pre-processing occuring in the vgg16 caffe
        prototxt.
        """ 
        im = np.float32(im)
        im = im[:, :, ::-1]  # change to BGR
        im -= self.mean
        im *= self.scale
        im = im.transpose((2, 0, 1)) 
        return im
 
    def deprocess(self, im):
        """
        inverse of preprocess()
        """
        im = im.transpose(1, 2, 0)
        im /= self.scale
        im += self.mean
        im = im[:, :, ::-1]  # change to RGB 
        return np.uint8(im)
 
 
class CaffeSolver:
 
    """
    Caffesolver is a class for creating a solver.prototxt file. It sets default
    values and can export a solver parameter file.
    Note that all parameters are stored as strings. Strings variables are
    stored as strings in strings.
    """
 
    def __init__(self, testnet_prototxt_path="testnet.prototxt",
                 trainnet_prototxt_path="trainnet.prototxt", debug=False):
 
        self.sp = {}
 
        # critical:
        self.sp['base_lr'] = '0.001'
        self.sp['momentum'] = '0.9'
 
        # speed:
        self.sp['test_iter'] = '100'
        self.sp['test_interval'] = '250'
 
        # looks:
        self.sp['display'] = '25'
        self.sp['snapshot'] = '2500'
        self.sp['snapshot_prefix'] = '"snapshot"'  # string withing a string!
 
        # learning rate policy
        self.sp['lr_policy'] = '"fixed"'
 
        # important, but rare:
        self.sp['gamma'] = '0.1'
        self.sp['weight_decay'] = '0.0005'
        self.sp['train_net'] = '"' + trainnet_prototxt_path + '"'
        self.sp['test_net'] = '"' + testnet_prototxt_path + '"'
 
        # pretty much never change these.
        self.sp['max_iter'] = '100000'
        self.sp['test_initialization'] = 'false'
        self.sp['average_loss'] = '25'  # this has to do with the display.
        self.sp['iter_size'] = '1'  # this is for accumulating gradients
 
        if (debug):
            self.sp['max_iter'] = '12'
            self.sp['test_iter'] = '1'
            self.sp['test_interval'] = '4'
            self.sp['display'] = '1'
 
    def add_from_file(self, filepath):
        """
        Reads a caffe solver prototxt file and updates the Caffesolver
        instance parameters.
        """
        with open(filepath, 'r') as f:
            for line in f:
                if line[0] == '#':
                    continue
                splitLine = line.split(':')
                self.sp[splitLine[0].strip()] = splitLine[1].strip()
 
    def write(self, filepath):
        """
        Export solver parameters to INPUT "filepath". Sorted alphabetically.
        """
        f = open(filepath, 'w')
        for key, value in sorted(self.sp.items()):
            if not(type(value) is str):
                raise TypeError('All solver parameters must be strings')
            f.write('%s: %s\n' % (key, value))


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
要在PyCharm中使用Caffe,您可以按照以下步骤进行设置: 1. 确保您已经按照之前提到的方法正确安装了Caffe,并且可以在命令行中成功导入caffe模块。 2. 打开PyCharm并创建一个新的Python项目。 3. 在PyCharm的项目中,选择File -> Settings -> Project: [项目名称] -> Project Interpreter。 4. 在Project Interpreter设置中,点击右上角的"+"按钮,然后选择"Add"。 5. 在弹出的对话框中,搜索并选择您已经安装的Python解释器。确保选择与您安装Caffe使用Python版本相对应的解释器。 6. 在解释器列表中,点击"Show paths for selected interpreter"按钮(位于解释器列表的右上角)。 7. 在Paths设置中,点击右上角的"+"按钮,然后添加CaffePython包路径。路径应该指向Caffe安装目录下的`python`文件夹。例如,如果Caffe安装在`/path/to/caffe`,则路径应该是`/path/to/caffe/python`。 8. 完成设置后,点击"OK"按钮以保存并应用更改。 现在,您应该能够在PyCharm中使用Caffe了。您可以导入caffe模块并开始编写相关代码。例如: ```python import caffe # 在这里编写你的代码 ``` 请注意,如果PyCharm无法找到caffe模块,您可能需要重新启动PyCharm或重新加载项目。 希望这些步骤能帮助您在PyCharm中成功使用Caffe。如果您仍然遇到问题,请参考Caffe的官方文档和PyCharm的帮助文档,或者在相关的开发者社区中寻求支持。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值