AlexNet+cifar(tensorflow)

RT(仅作记录)

[代码来自] (https://github.com/xi-mao/alexnet-cifar-10)

Alexnet

#coding=utf-8
import math
import tensorflow as tf

def print_activations(t):
    print(t.op.name,'',t.get_shape().as_list)   #get_shape获取一个TensorShape对象,然后通过as_list方法返回每一个维度数

def model():
    _IMAGE_SIZE=32
    _IMAGE_CHANNELS=3
    _RESHAPE_SIZE=3*3*128
    _NUM_CLASSES=10

    parameters=[]
    with  tf.name_scope('data'):
        x=tf.placeholder(tf.float32,shape=[None,_IMAGE_SIZE*_IMAGE_SIZE*_IMAGE_CHANNELS],name='images')
        y=tf.placeholder(tf.float32,shape=[None,_NUM_CLASSES],name='Output')
        images=tf.reshape(x,[-1,_IMAGE_SIZE,_IMAGE_SIZE,_IMAGE_CHANNELS],name='images')
        print(images) 
    #conv1
    #这里name_scope实际上是为了解决共享变量的问题,在name_scope下进行tf.Variable(name)
    #如果name重名,会自动检测命名冲突进行处理   
    with tf.name_scope('conv1') as scope:          
        kernel=tf.Variable(tf.truncated_normal([5,5,3,64],dtype=tf.float32,
                                        stddev=1e-1),name='weights')
        #变量解释 [a,b,c,d]分别表示,1表示是否跳过一些样本,比如a=1时,就是从1,2,3...训
        #跳过一些,a=2时就选择1,3,5...,b表示高方向滑动,c表示宽方向滑动,d表示通道滑动
        #same表示当卷积核超出边界时会进行0填充
        conv=tf.nn.conv2d(images,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32),
                                        trainable=True,name='bias')
        bias=tf.nn.bias_add(conv,biases)
        conv1=tf.nn.relu(bias,name=scope)    #这里返回的是一个tensor(一个张量类),但是这里的name=scope是什么意思?
        print_activations(conv1)
    tf.summary.histogram('Convolution_layers/conv1',conv1)
    tf.summary.scalar('Convolution_layers/conver1',tf.nn.zero_fraction(conv1))


    with tf.name_scope('lrn1') as scope:
        lrn1=tf.nn.local_response_normalization(conv1,
                                                alpha=1e-4,
                                                beta=0.75,
                                                depth_radius=2,
                                                bias=2.0)#lrn
    #pool1
    pool1=tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1], 
                    padding='VALID',name='pool1')#池化

    print_activations(pool1)#这有个问题 照理是先池化后lrn 可是这里是先lrn后池化

    #conv2同上
    with tf.name_scope('conv2') as scope:
        kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 64], dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
        conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                                                 trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias, name=scope)
    tf.summary.histogram('Convolution_layers/conv2',conv2)
    tf.summary.scalar('Convolution_layers/conver2',tf.nn.zero_fraction(conv2))
    print_activations(conv2)
    #lrn2
    with tf.name_scope('lrn2') as scope:
        lrn2 = tf.nn.local_response_normalization(conv2,alpha=1e-4,beta=0.75,
                                                depth_radius=2, bias=2.0)
    # pool2
    pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1],
                            padding='VALID',name='pool2')
    print_activations(pool2)

    #conv3少了池化和lrn
    with tf.name_scope('conv3') as scope:
        kernel =tf.Variable(tf.truncated_normal([3,3,64,128],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
        conv=tf.nn.conv2d(pool2,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32),
                                        trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv3=tf.nn.relu(bias,name=scope)
        print_activations(conv3)
    tf.summary.histogram('Convolution_layers/conv3',conv3)
    tf.summary.scalar('Convolution_layers/conver3',tf.nn.zero_fraction(conv3))
    #conv4同3
    with tf.name_scope('conv4') as scope:
        kernel =tf.Variable(tf.truncated_normal([3,3,128,128],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
        conv=tf.nn.conv2d(conv3,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32),
                                        trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv4=tf.nn.relu(bias,name=scope)
        print_activations(conv4)
    tf.summary.histogram('Convolution_layers/conv4',conv4)
    tf.summary.scalar('Convolution_layers/conver4',tf.nn.zero_fraction(conv4))
    #conv5
    with tf.name_scope('conv5') as scope:
        kernel =tf.Variable(tf.truncated_normal([3,3,128,128],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
        conv=tf.nn.conv2d(conv4,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32),
                                        trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv5=tf.nn.relu(bias,name=scope)
        print_activations(conv5)
    tf.summary.histogram('Convolution_layers/conv5',conv5)
    tf.summary.scalar('Convolution_layers/conver5',tf.nn.zero_fraction(conv5))

    #pool5
    pool5=tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],
                            padding='VALID',name='pool5')
    print_activations(pool5)

    #fully_connected1
    with tf.name_scope('fully_connected1') as scope:
        reshape=tf.reshape(pool5,[-1,_RESHAPE_SIZE])
        dim=reshape.get_shape()[1].value
        weights =tf.Variable(tf.truncated_normal([dim,384],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
        print_activations(weights)
        biases=tf.Variable(tf.constant(0.0,shape=[384],dtype=tf.float32),
                                        trainable=True,name='biases')
        local3=tf.nn.relu(tf.matmul(reshape,weights)+biases,name=scope)
        print_activations(local3)
    tf.summary.histogram('Fully connected layers/fc1',local3)
    tf.summary.scalar('Fully connected layers/fc1',tf.nn.zero_fraction(local3))

    #fully_connected2
    with tf.name_scope('fully_connected') as scope:
        weights =tf.Variable(tf.truncated_normal([384,192],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
        print_activations(weights)
        biases=tf.Variable(tf.constant(0.0,shape=[192],dtype=tf.float32),
                                        trainable=True,name='biases')
        local4=tf.nn.relu(tf.matmul(local3,weights)+biases,name=scope)
        print_activations(local4)
    tf.summary.histogram('Fully connected layers/fc2',local4)
    tf.summary.scalar('Fully connected layers/fc4',tf.nn.zero_fraction(local4))

    #output
    with tf.name_scope('output') as scope:
        weights =tf.Variable(tf.truncated_normal([192,_NUM_CLASSES],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
        print_activations(weights)
        biases=tf.Variable(tf.constant(0.0,shape=[_NUM_CLASSES],dtype=tf.float32),
                                        trainable=True,name='biases')
        softmax_linear=tf.add(tf.matmul(local4,weights),biases,name=scope)
    tf.summary.histogram('Fully connected layers/output',softmax_linear)

    global_step=tf.Variable(initial_value=0,name='global_step',trainable=False)
    y_pred_cls=tf.argmax(softmax_linear,axis=1)


    return x,y,softmax_linear,global_step,y_pred_cls

train

#coding=utf-8
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from time import time

from alexnet import model  
from data import get_data_set

train_x,train_y,tain_l=get_data_set("train")#train_x训练图片,train_y label tain_l表示classes
test_x,test_y,test_l=get_data_set("test")

x,y,output,global_step,y_pred_cls=model()#x表示image y表示输出 ,output表示softmax值 y_pred_cls表示分类值
_IMG_SIZE = 32
_NUM_CHANNELS = 3
_BATCH_SIZE = 128
_CLASS_SIZE = 10
_ITERATION = 30000
_SAVE_PATH = "tensorboard/cifar-10/"  #先创建好这些文件
_SAVE_BOARD_PATH="tensorboard/board/"

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output,labels=y))
#计算loss softmax_cross_entropy_with_logits参数(神经网络最后一层的输出,实际的标签)
optimizer=tf.train.RMSPropOptimizer(learning_rate=1e-3).minimize(loss,global_step=global_step)
#RMS算法优化器用于优化loss具体见http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf

correct_prediction=tf.equal(y_pred_cls,tf.argmax(y,axis=1))#将y_pred_cls,tf.argmax(y,axis=1)对比bool值
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#计算准确值 tf.cast是转换类型函数 将correct_prediction转换成tf.float32

tf.summary.scalar('loss',loss)
#用来显示标量信息,其格式为:
#tf.summary.scalar(tags, values, collections=None, name=None)
tf.summary.scalar("Accyracy/train",accuracy)
tf.summary.histogram('histogram',accuracy)
#用来显示直方图信息,其格式为:
#tf.summary.histogram(tags, values, collections=None, name=None)
saver=tf.train.Saver()
sess=tf.Session()
merged=tf.summary.merge_all()
#merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。
train_writer=tf.summary.FileWriter(_SAVE_BOARD_PATH,sess.graph)#指定一个文件用来保存图。
sess.run(tf.global_variables_initializer())

#sess_path=saver.save(sess,_SAVE_PATH)
#try:
#   print("Trying to restore last checkpoint..... ")
#    last_chk_path=tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)   #将变量保存在此路径
#   saver.restore(sess,save_path=last_chk_path)
#    print("Restored checkpoint from:",last_chk_path)
#except:
#    print("Failed to restore checkpoint.Initializing variables instead")
#    sess.run(tf.global_variables_initializer())

def train(num_iterations):
    for i in range(num_iterations):
        randidx=np.random.randint(len(train_x),size=_BATCH_SIZE)    #此处返回的是小于len(train)的离散均匀分布,总共有128个
        batch_xs=train_x[randidx]
        batch_ys=train_y[randidx]

        start_time=time()
        i_global,_=sess.run([global_step,optimizer],feed_dict={x:batch_xs,y:batch_ys})
        duration=time()-start_time


        if(i_global%10==0)or(i==num_iterations-1):#每十次或者最后次时
            _loss,batch_acc=sess.run([loss,accuracy],feed_dict={x:batch_xs,y:batch_ys})
            msg= "Glo bal Step: {0:>6}, accuracy: {1:>6.1%}, loss = {2:.2f} ({3:.1f} examples/sec, {4:.2f} sec/batch)"
            print(msg.format(i_global, batch_acc, _loss, _BATCH_SIZE / duration, duration))

            resultmerged=sess.run(merged,feed_dict={x:batch_xs,y:batch_ys})
            train_writer.add_summary(resultmerged,i_global)#tensorboard保存


        if  (i_global%100==0)or(i==num_iterations-1):#每一百次或者最后次时
            
            acc=predict_test()

            print('test accuracy is:')
            print(acc)
            saver.save(sess,save_path=_SAVE_PATH,global_step=global_step)
            print("Saved checkpoint")


def predict_test(show_confusion_matrix=False):

    i=0
    predicted_class=np.zeros(shape=len(test_x),dtype=np.int)#返回一个新的数组,用零填充
    print('test_x的长度:')
    print(len(test_x))

    while i<len(test_x):
        j=min(i+_BATCH_SIZE,len(test_x))
        batch_xs=test_x[i:j,:]
        #batch_xs是128*3072的大小,最后一个是16*3072
        batch_ys=test_y[i:j,:]
        predicted_class[i:j]=sess.run(y_pred_cls,feed_dict={x:batch_xs,y:batch_ys})
        i=j

    correct=(np.argmax(test_y,axis=1)==predicted_class)#取test_y中每个最大值的索引号
    acc=correct.mean()*100

    correct_numbers=correct.sum()

    print("Accuracy on Test-Set:{0:.2f}%({1}/{2})".format(acc,correct_numbers,len(test_x)))

    if show_confusion_matrix is True:
        cm=confusion_matrix(y_true=np.argmax(test_y,axis=1),y_pred=predicted_class)#混淆矩阵
        for i in range(_CLASS_SIZE):
            class_name="({}){}".format(i,test_l[i])
            print (cm[i:],class_name)
        class_numbers=["({0})".format(i) for i in range(_CLASS_SIZE)]
        print("".join(class_numbers))
    
    return acc

if _ITERATION!=0:
    train(_ITERATION)

sess.close()


剩下两个文件不做标注了。

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值