深度学习 Python TensorFlow框架下AlexNet卷积神经网络(学习笔记)

from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

#导入minist手写数据集
mnist=input_data.read_data_sets('/mnist',one_hot=True)

#定义网络超参数
learning_rate=0.001
training_iters=200000
batch_size=64
display_step=20

#定义网络参数
n_input=784#输入的维度
n_classes=10#类别数
dropout=0.8#Dropout的概率

#占位符输入
x=tf.placeholder(tf.float32,[None,n_input])
y=tf.placeholder(tf.float32,[None,n_classes])
keep_prob=tf.placeholder(tf.float32)

#卷积操作
def conv2d(name,l_input,w,b):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input,w,strides=[1,1,1,1],padding='SAME'),b),name=name)

#最大下采样操作
def max_pool(name,l_input,k):
    return tf.nn.max_pool(l_input,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME',name=name)

#归一化操作
def norm(name,l_input,lsize=4):
    return tf.nn.lrn(l_input,lsize,bias=1.0,alpha=0.001/9.0,beta=0.75,name=name)
   
#定义整个网络
def alex_net(_X,_weights,_biases,_dropout):
    #向量转为矩阵
    _X=tf.reshape(_X,shape=[-1,28,28,1])
    
    #卷积层
    conv1=conv2d('conv1',_X,_weights['wc1'],_biases['bc1'])
    #下采样层
    pool1=max_pool('pool1',conv1,k=2)
    #归一化层
    norm1=norm('norm1',pool1,lsize=4)
    #Dropout
    norm1=tf.nn.dropout(norm1,_dropout)
    
    #卷积层
    conv2=conv2d('conv2',norm1,_weights['wc2'],_biases['bc2'])
    #下采样层
    pool2=max_pool('pool2',conv2,k=2)
    #归一化层
    norm2=norm('norm2',pool2,lsize=4)
    #Dropout
    norm2=tf.nn.dropout(norm2,_dropout)
    
    #卷积层
    conv3=conv2d('conv3',norm2,_weights['wc3'],_biases['bc3'])
    #下采样层
    pool3=max_pool('pool3',conv3,k=2)
    #归一化层
    norm3=norm('norm3',pool3,lsize=4)
    #Dropout
    norm3=tf.nn.dropout(norm3,_dropout)
    
    #全连接层,先把特征图转为向量
    dense1=tf.reshape(norm3,[-1,_weights['wd1'].get_shape().as_list()[0]])
    dense1=tf.nn.relu(tf.matmul(dense1,_weights['wd1'])+_biases['bd1'],name='fc1')
    #全连接层
    dense2=tf.nn.relu(tf.matmul(dense1,_weights['wd2'])+_biases['bd2'],name='fc2')
    
    #网络输出层
    out=tf.matmul(dense2,_weights['out'])+_biases['out']
    return out

# 存储所有的网络参数
weights = {
    'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
    'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
    'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
    'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
    'wd2': tf.Variable(tf.random_normal([1024, 1024])),
    'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
    'bc1': tf.Variable(tf.random_normal([64])),
    'bc2': tf.Variable(tf.random_normal([128])),
    'bc3': tf.Variable(tf.random_normal([256])),
    'bd1': tf.Variable(tf.random_normal([1024])),
    'bd2': tf.Variable(tf.random_normal([1024])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

#构建模型
pred=alex_net(x,weights,biases,keep_prob)

#定义损失函数和学习步骤
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

#测试网络,tf.cast()类型转换
correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

#初始化所有的共享变量
init=tf.initialize_all_variables()

#开启训练
with tf.Session() as sess:
    sess.run(init)
    step=1
    while step*batch_size<training_iters:
        batch_xs,batch_ys=mnist.train.next_batch(batch_size)
        #获取批数据
        sess.run(optimizer,feed_dict={x:batch_xs,y:batch_ys,keep_prob:dropout})
        if step%display_step==0:
            #计算精度
            acc=sess.run(accuracy,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.})
            #计算损失值
            loss=sess.run(cost,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.})
            print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy = " + "{:.5f}".format(acc))
        step+=1
    print("Optimization Finished!")
    #计算测试精度
    print("Testing Accuracy:",sess.run(accuracy,feed_dict={x:mnist.test.images[:256],y:mnist.test.labels[:256],keep_prob:1.}))

Iter 1280, Minibatch Loss= 125839.984375, Training Accuracy = 0.15625
Iter 2560, Minibatch Loss= 86830.515625, Training Accuracy = 0.28125
Iter 3840, Minibatch Loss= 42984.570312, Training Accuracy = 0.56250
Iter 5120, Minibatch Loss= 63393.296875, Training Accuracy = 0.54688
Iter 6400, Minibatch Loss= 37901.750000, Training Accuracy = 0.57812
Iter 7680, Minibatch Loss= 35346.679688, Training Accuracy = 0.62500
Iter 8960, Minibatch Loss= 36194.000000, Training Accuracy = 0.56250
Iter 10240, Minibatch Loss= 43173.476562, Training Accuracy = 0.57812
Iter 11520, Minibatch Loss= 34044.027344, Training Accuracy = 0.64062
Iter 12800, Minibatch Loss= 44820.593750, Training Accuracy = 0.43750
Iter 14080, Minibatch Loss= 36636.125000, Training Accuracy = 0.60938
……
……
……
Iter 189440, Minibatch Loss= 2648.641113, Training Accuracy = 0.92188
Iter 190720, Minibatch Loss= 2197.439941, Training Accuracy = 0.92188
Iter 192000, Minibatch Loss= 1557.574097, Training Accuracy = 0.87500
Iter 193280, Minibatch Loss= 348.010498, Training Accuracy = 0.95312
Iter 194560, Minibatch Loss= 1478.040894, Training Accuracy = 0.93750
Iter 195840, Minibatch Loss= 1610.684814, Training Accuracy = 0.93750
Iter 197120, Minibatch Loss= 3200.984619, Training Accuracy = 0.93750
Iter 198400, Minibatch Loss= 454.932739, Training Accuracy = 0.98438
Iter 199680, Minibatch Loss= 2239.986328, Training Accuracy = 0.89062
Optimization Finished!
Testing Accuracy: 0.96875

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值