利用tensorflow构建AlexNet模型,实现小数量级的猫狗分类(只有train)

首先看路径:

data文件夹分为,model文件夹,train文件夹和文件夹,model文件夹存放模型文件,train存放cat和dog的两个文件夹图片,

validation和train一样。config.py存放配置的一些参数,util.py定义AlexNet框架,AlexNet实现模型的训练,其他没有用到。

首先在config.py定义配置文件,都是卷积层和全连接层用到的参数,用字典的形式存储,

代码如下:

"""
配置文件:定义参数
"""
import tensorflow as tf
learning_rate=1e-4
training_iers=200
batch_size=50
display_step=5
n_classes=2
n_fc1=4096
n_fc2=2048

W_conv={'conv1':tf.Variable(tf.truncated_normal([11,11,3,96],stddev=0.0001)),
        'conv2': tf.Variable(tf.truncated_normal([5, 5, 96, 256], stddev=0.01)),
        'conv3': tf.Variable(tf.truncated_normal([3, 3, 256, 384], stddev=0.01)),
        'conv4': tf.Variable(tf.truncated_normal([3, 3, 384, 384], stddev=0.01)),
        'conv5': tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=0.01)),
        'fc1': tf.Variable(tf.truncated_normal([6*6*256,n_fc1], stddev=0.1)),
        'fc2': tf.Variable(tf.truncated_normal([n_fc1, n_fc2], stddev=0.1)),
        'fc3': tf.Variable(tf.truncated_normal([n_fc2, n_classes], stddev=0.1))}
b_conv={'conv1':tf.Variable(tf.constant(0.0,shape=[96],dtype=tf.float32)),
        'conv2':tf.Variable(tf.constant(0.1,shape=[256],dtype=tf.float32)),
        'conv3':tf.Variable(tf.constant(0.1,shape=[384],dtype=tf.float32)),
        'conv4':tf.Variable(tf.constant(0.1,shape=[384],dtype=tf.float32)),
        'conv5':tf.Variable(tf.constant(0.1,shape=[256],dtype=tf.float32)),
        'fc1': tf.Variable(tf.constant(0.1,shape=[n_fc1],dtype=tf.float32)),
        'fc2': tf.Variable(tf.constant(0.1,shape=[n_fc2],dtype=tf.float32)),
        'fc3': tf.Variable(tf.constant(0.0,shape=[n_classes],dtype=tf.float32))}

在util.py定义AlexNet框架

"""
定义 AlexNet框架
"""
import os
import numpy as np
from config import W_conv,b_conv,n_classes,learning_rate
import tensorflow as tf
"""
构建AlexNet模型
"""
"""
对特征图进行归一化 采用标准化处理
"""
def batch_norm(inputs,is_training,is_conv_out=True,decay=0.999):
    scale=tf.Variable(tf.ones([inputs.get_shape()[-1]]))
    beta=tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
    pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]),trainable=False)
    pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]),trainable=False)
    if is_training:
        if is_conv_out:
            batch_mean,batch_var=tf.nn.moments(inputs,[0,1,2])
        else:
            batch_mean, batch_var =tf.nn.moments(inputs, [0])
        #滑动平均
        train_mean=tf.assign(pop_mean,pop_mean*decay+batch_mean*(1-decay))
        train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
        with tf.control_dependencies([train_mean,train_var]):
            return tf.nn.batch_normalization(inputs,
                                             batch_mean,batch_var,beta,scale,0.001)
    else:
        return tf.nn.batch_normalization(inputs,
                                         pop_mean, pop_var, beta, scale, 0.001)
def build_cnn(x,y):
    x_image=tf.reshape(x,[-1,227,227,3])
    #卷积层1
    conv1=tf.nn.relu(tf.nn.conv2d(x_image,W_conv['conv1'],strides=[1,4,4,1],padding='VALID')
                     +b_conv['conv1'])
    conv1=batch_norm(conv1,True)
    #池化层1
    pool1=tf.nn.avg_pool(conv1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')
    #LRN层1
    norm1=tf.nn.lrn(pool1,5,bias=1.0,alpha=0.001/9.0,beta=0.75)

    #卷积层2
    conv2=tf.nn.relu(tf.nn.conv2d(norm1,W_conv['conv2'],strides=[1,1,1,1],padding='SAME')
                     +b_conv['conv2'])
    conv2 = batch_norm(conv2, True)
    #池化层2
    pool2=tf.nn.avg_pool(conv2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')
    #LRN层2
    norm2=tf.nn.lrn(pool2,5,bias=1.0,alpha=0.001/9.0,beta=0.75)

    #卷积层3
    conv3=tf.nn.relu(tf.nn.conv2d(norm2,W_conv['conv3'],strides=[1,1,1,1],padding='SAME')
                     +b_conv['conv3'])
    conv3 = batch_norm(conv3, True)
    #卷积层4
    conv4=tf.nn.relu(tf.nn.conv2d(conv3,W_conv['conv4'],strides=[1,1,1,1],padding='SAME')
                     +b_conv['conv4'])
    conv4 = batch_norm(conv4, True)
    #卷积层5
    conv5=tf.nn.relu(tf.nn.conv2d(conv4,W_conv['conv5'],strides=[1,1,1,1],padding='SAME')
                     +b_conv['conv5'])
    conv5 = batch_norm(conv5, True)
    #池化层5
    pool5=tf.nn.avg_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')

    #全连接层1
    fc1=tf.matmul(tf.reshape(pool5, [-1, 6 * 6 * 256]), W_conv['fc1']) + b_conv['fc1']
    fc1 = batch_norm(fc1, True,is_conv_out=False)
    fc1=tf.nn.relu(fc1)
    fc1=tf.nn.dropout(fc1,0.5)
    #全连接层2
    fc2=tf.matmul(fc1, W_conv['fc2']) + b_conv['fc2']
    fc2 = batch_norm(fc2, True, is_conv_out=False)
    fc2=tf.nn.relu(fc2)
    fc2=tf.nn.dropout(fc2,0.5)
    #全连接层3
    fc3=tf.matmul(fc2,W_conv['fc3'])+b_conv['fc3']

    #定义损失
    loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=fc3,labels=y))
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
    #评估模型
    accuarcy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(fc3,1),tf.argmax(y,1)),dtype=tf.float32))
    return loss,optimizer,accuarcy

"""
读取train.tfrecords并返回image和label 这个是已经做好的.tfrecords文件对于太大的不适合
"""
def read_and_decode(filename,batch_size):
    filename_queue=tf.train.string_input_producer([filename])# 按队列的形式读取
    reader=tf.TFRecordReader()
    _,serialized_example=reader.read(filename_queue)#返回文件名和文件
    features=tf.parse_single_example(serialized_example,
                            features={
                                'label':tf.FixedLenFeature([],tf.int64),#与存储的类型一致
                                'image':tf.FixedLenFeature([],tf.string)
                            })
    img=tf.decode_raw(features['image'],tf.uint8)
    img=tf.reshape(img,shape=[227,227,3])
    #img = tf.cast(img, dtype=tf.float32) * (1.0 / 128) - 0.5
    label = tf.cast(features['label'], dtype=tf.int32)
    img_batch, label_batch = tf.train.shuffle_batch([img, label], batch_size=batch_size,
                                                    capacity=64, min_after_dequeue=32,
                                                    num_threads=64)
    return img_batch,tf.reshape(label_batch,[batch_size])
"""
将图片的路径和对应的标签存储在list中返回
"""
def get_file(dir):
    images = []
    temp = []
    for root,dirs,files in os.walk(dir):
        for name in files:
            images.append(os.path.join(root,name))
        for name in dirs:#以dogs cats文件夹形式读取
            temp.append(os.path.join(root,name))

    labels=[]
    for one_folder in temp:
        n_img=len(os.listdir(one_folder))#展开cats或者dogs的图片
        letter=one_folder.split('/')[-1]
        if letter=='cats':
            labels=np.append(labels,n_img*[0])#np.append拼接 0是cat 1是dog
        else:
            labels=np.append(labels,n_img*[1])
    #打乱
    temp=np.array([images,labels])
    temp=temp.transpose()
    np.random.shuffle(temp)
    image_list=list(temp[:,0])
    label_list=list(temp[:,1])
    label_list=[int(float(i)) for i in label_list]
    return image_list,label_list
"""
太大的TFrecord数据集占内存,用此方法可将需要数量的图片转换成TFrecord即可
"""
def get_batch(image_list,label_list,img_width,img_height,batch_size,capacity):
    image=tf.cast(image_list,dtype=tf.string)
    label=tf.cast(label_list, dtype=tf.int32)
    input_queue=tf.train.slice_input_producer([image,label])
    label=input_queue[1]
    image_contents=tf.read_file(input_queue[0])
    image=tf.image.decode_jpeg(image_contents,channels=3)
    image=tf.image.resize_image_with_crop_or_pad(image,img_width,img_height)
    image=tf.image.per_image_standardization(image)#将图片标准化
    image_batch,label_batch=tf.train.batch([image,label],batch_size=batch_size,num_threads=64,capacity=capacity)
    label_batch=tf.reshape(label_batch,[batch_size])
    return image_batch,label_batch
"""
实现标签的one-hot
"""
# #转换one-hot
def one_hot(label):
    n_classes=max(label)+1
    label = np.eye(n_classes)[label.reshape(-1)]
    return label
# 转换one-hot
# def one_hot(labels):
#     n_samples=len(labels)
#     n_class=n_samples+1
#     onehot_label=np.zeros((n_samples,n_class))
#     onehot_label[np.arange(n_samples),labels]=1
#     return onehot_label

最后在AlexNet.py调用即可

import tensorflow as tf
import util
import config
import time
import matplotlib.pyplot as plt
"""
直接读取整个train.tfrecord
"""
#filename='./data/train/train.tfrecords'
#img_batch,label_batch=util.read_and_decode(filename,batch_size=32)
"""
按照需求来读取图片
"""
dir='./data/train'
image_list,label_list=util.get_file(dir)
img_batch,label_batch=util.get_batch(image_list,label_list,227,227,batch_size=32,capacity=64)
x=tf.placeholder(shape=[None,227,227,3],dtype=tf.float32)
y=tf.placeholder(shape=[None,config.n_classes],dtype=tf.float32)
loss,optimizer,accuarcy=util.build_cnn(x,y)
def train(epoch):
    init=tf.global_variables_initializer()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        costs=[]
        start_time=time.time()
        save_model='./data/model/AlexNetModel.ckpt'
        train_writer=tf.summary.FileWriter('./log',sess.graph)
        #启动线程
        #coord=tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess)
        for i in range(epoch):
            image,label=sess.run([img_batch,label_batch])
            labels = util.one_hot(label)
            _,cost,train_accuarcy=sess.run([optimizer,loss,accuarcy],feed_dict={x:image,y:labels})
            print('step={},loss={},train_accuarcy={}'.format(i,cost,train_accuarcy))
            costs.append(cost)
            end_time=time.time()
            print('step={},time={}'.format(i,(end_time-start_time)))
            print('')
        print('optimization is finish')
        saver.save(sess,save_model)
        print('model save finished')
        # coord.request_stop()
        # coord.join(threads)
        plt.plot(costs)
        plt.show()
        plt.xlabel('iter')
        plt.ylabel('cost')
if __name__ == '__main__':
    train(epoch=50)


损失值打印结果:

未加特征处理

加了归一化处理:明显损失值得到了减少

 

由于做好的train.TFrecorder数据集太大,后面改成按照所需的数据量转换成TFrecorder。

评论 22
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值