TensorFlow的学习之路--VGG

# -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 16:11:02 2018

@author: kxq
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
##this data

batch_size=64
training_iters=200000

mnist=input_data.read_data_sets('MNIST_data',one_hot=True)

def weight_variable(shape):
    init=tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(init)

def bias_variable(shape):
    init=tf.constant(0.1,shape=shape)
    return tf.Variable(init)

def conv_layer(X,W,Strides):
    return tf.nn.conv2d(X,W,strides=Strides,padding='SAME')

def pool_layer(conv,Ksize,Strides):
    return tf.nn.max_pool(conv,ksize=Ksize,strides=Strides,padding='SAME')

x=tf.placeholder(shape=[None,28*28],dtype=tf.float32)
y=tf.placeholder(shape=[None,10],dtype=tf.float32)
keep_prob = tf.placeholder(tf.float32)
X_image=tf.reshape(x,[-1,28,28,1])
##conv1
weight_1=weight_variable([3,3,1,32])
bias_1=bias_variable([32])
conv_1=tf.nn.relu(tf.nn.bias_add(conv_layer(X_image,weight_1,[1,1,1,1]),bias_1)) ##28*28*32

##conv2
weight_2=weight_variable([3,3,32,32])
bias_2=bias_variable([32])
conv_2=tf.nn.relu(tf.nn.bias_add(conv_layer(conv_1,weight_2,[1,1,1,1]),bias_2)) ##28*28*32

##pool_1
pool_1=pool_layer(conv_2,[1,2,2,1],[1,2,2,1])                                   ##14*14*32

pool_1 = tf.nn.dropout(pool_1, keep_prob)

##conv_3
weight_3=weight_variable([3,3,32,64])
bias_3=bias_variable([64])
conv_3=tf.nn.relu(tf.nn.bias_add(conv_layer(pool_1,weight_3,[1,1,1,1]),bias_3)) ##14*14*64

##conv_4

weight_4=weight_variable([3,3,64,64])
bias_4=bias_variable([64])
conv_4=tf.nn.relu(tf.nn.bias_add(conv_layer(conv_3,weight_4,[1,1,1,1]),bias_4)) ##14*14*64

##pool_2
pool_2=pool_layer(conv_4,[1,2,2,1],[1,2,2,1])                                   ##7*7*64
pool_2 = tf.nn.dropout(pool_2, keep_prob)

##conv_5
weight_5=weight_variable([3,3,64,128])
bias_5=bias_variable([128])
conv_5=tf.nn.relu(tf.nn.bias_add(conv_layer(pool_2,weight_5,[1,1,1,1]),bias_5)) ##7*7*128

##conv_6
weight_6=weight_variable([3,3,128,128])
bias_6=bias_variable([128])
conv_6=tf.nn.relu(tf.nn.bias_add(conv_layer(conv_5,weight_6,[1,1,1,1]),bias_6)) ##7*7*128    

##conv_7
weight_7=weight_variable([3,3,128,128])
bias_7=bias_variable([128])
conv_7=tf.nn.relu(tf.nn.bias_add(conv_layer(conv_6,weight_7,[1,1,1,1]),bias_7)) ##7*7*128   

##pool_3
pool_3=pool_layer(conv_7,[1,2,2,1],[1,2,2,1])                                   ##4*4*128
pool_3 = tf.nn.dropout(pool_3, keep_prob)

##conv_8
weight_8=weight_variable([3,3,128,256])
bias_8=bias_variable([256])
conv_8=tf.nn.relu(tf.nn.bias_add(conv_layer(pool_3,weight_8,[1,1,1,1]),bias_8)) ##4*4*256

##conv_9
weight_9=weight_variable([3,3,256,256])
bias_9=bias_variable([256])
conv_9=tf.nn.relu(tf.nn.bias_add(conv_layer(conv_8,weight_9,[1,1,1,1]),bias_8)) ##4*4*256

##conv_10
weight_10=weight_variable([3,3,256,256])
bias_10=bias_variable([256])
conv_10=tf.nn.relu(tf.nn.bias_add(conv_layer(conv_9,weight_10,[1,1,1,1]),bias_8)) ##4*4*256

##pool_4
pool_4=pool_layer(conv_10,[1,2,2,1],[1,2,2,1])                                    ##2*2*256
pool_4 = tf.nn.dropout(pool_4, keep_prob)

##func_1 
weight_fc1=weight_variable([2*2*256,1024])
bias_fc1=bias_variable([1024])
pool_4_flat=tf.reshape(pool_4,[-1,2*2*256])
func_1=tf.nn.relu(tf.nn.bias_add(tf.matmul(pool_4_flat,weight_fc1),bias_fc1))   ##1*1024

##func_2
weight_fc2=weight_variable([1024,1024])
bias_fc2=bias_variable([1024])
func_2=tf.nn.relu(tf.nn.bias_add(tf.matmul(func_1,weight_fc2),bias_fc2))        ##1*1024

##func_3 ##网络输出
weight_fc3=weight_variable([1024,10])
bias_fc3=bias_variable([10])
pred=tf.nn.bias_add(tf.matmul(func_2,weight_fc3),bias_fc3)

loss= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(0.001).minimize(loss)
##测试
correct=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,tf.float32))
init=tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    step=1
    while batch_size*step<training_iters:
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        _,l=sess.run([optimizer,loss], feed_dict={x: batch_xs, y: batch_ys, keep_prob:0.8})
        if step%20==0:
            acc=sess.run(accuracy,feed_dict={x: batch_xs, y: batch_ys, keep_prob:1.})
            print("loss=",l,"accuracy=",acc)
        step += 1
    print ("Optimization Finished!")
    # 计算测试精度
    print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))
VGG相对于之前的卷积,多了几层卷积层以及池化层, 准确率也相应提高了,最后测试准确率为98.4375%
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值