手码VGG16网络

VGG16网络在网上有很多原理分析,建议在看代码之前,先分析其原理,有助于学习。

以下是个人所做的一个小东西的源码,使用的是VGG16模型(有所改动),只是作为交流学习,并不推荐直接使用(处理数据不同)。

不会使用MarkDown编辑,所以源码有的对其有点问题。

发现没有数据存储成TFRECORDS格式,我再写一个http://blog.csdn.net/dreamer_kitty/article/details/78437689,也就是我的下一篇博客

#coding:utf-8

import tensorflow as tf
import numpy as np


##read the tfrecod from radio data
#数据制作成record格式

def read_and_decode(filename): # 读入radio_train.tfrecords
    filename_queue = tf.train.string_input_producer([filename])#生成一个queue队列


    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)#返回文件名和文件
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw' : tf.FixedLenFeature([], tf.string),
                                       })#将image数据和label取出来


    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [64,192, 3])  #reshape为80*80的3通道图片
    img = tf.cast(img, tf.float32) * (1. / 255)  #在流中抛出img张量  (-0.5) Y/N
    label = tf.cast(features['label'], tf.int32) #在流中抛出label张量
    return img, label

#设置超参数
n_input=12288
n_class=10
learning_rate=0.001
batch_size=200


x=tf.placeholder(tf.float32,[None,64,192,3])
y=tf.placeholder(tf.float32,[None,n_class])
keep_prob=tf.placeholder(tf.float32) #dropout

#定义卷积层,POOLing层,标准化层函数,方便后面调用
#The later i will defind the layer of CONV&Pooling&norm
def conv2d(name,x,w,b,strides=1):
x=tf.nn.conv2d(x,w,strides=[1,strides,strides,1],padding='SAME')
x=tf.nn.bias_add(x,b)
return tf.nn.relu(x,name=name)


def maxpool2d(name,x,k=2):
return tf.nn.max_pool(x,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME',name=name)


def norm(name,inp,size=4):

return tf.nn.lrn(inp,size,bias=1.0,alpha=0.001/9.0,beta=0.75,name=name)

#数据标签的制作

def made_label(batch_ys):
bacth_y=np.zeros((batch_size,n_class))
        i=0
        for j in batch_ys:
          bacth_y[i][j]=1
            i=i+1
return bacth_y

#网络参数W,B矩阵,根据自身数据的不同,需要进行调整

weight={
'wc1_1':tf.Variable(tf.random_normal([3,3,3,16])),
'wc1_2':tf.Variable(tf.random_normal([3,3,16,16])),
'wc2_1':tf.Variable(tf.random_normal([3,3,16,32])),
'wc2_2':tf.Variable(tf.random_normal([3,3,32,32])),
'wc3_1':tf.Variable(tf.random_normal([3,3,32,64])),
'wc3_2':tf.Variable(tf.random_normal([3,3,64,64])),
'wc3_3':tf.Variable(tf.random_normal([3,3,64,64])),
'wc3_4':tf.Variable(tf.random_normal([3,3,64,64])),
'wc4_1':tf.Variable(tf.random_normal([3,3,64,128])),
'wc4_2':tf.Variable(tf.random_normal([3,3,128,128])),
'wc4_3':tf.Variable(tf.random_normal([3,3,128,128])),
'wc4_4':tf.Variable(tf.random_normal([3,3,128,128])),
'wc5_1':tf.Variable(tf.random_normal([3,3,128,256])),
'wc5_2':tf.Variable(tf.random_normal([3,3,256,256])),
'wc5_3':tf.Variable(tf.random_normal([3,3,256,256])),
'wc5_4':tf.Variable(tf.random_normal([3,3,256,256])),
'fc1':tf.Variable(tf.random_normal([2*6*256,4096])),
'fc2':tf.Variable(tf.random_normal([4096,1024])),
'fc3':tf.Variable(tf.random_normal([1024,10]))
}
bias={
'bc1_1':tf.Variable(tf.random_normal([16])),
'bc1_2':tf.Variable(tf.random_normal([16])),
'bc2_1':tf.Variable(tf.random_normal([32])),
'bc2_2':tf.Variable(tf.random_normal([32])),
'bc3_1':tf.Variable(tf.random_normal([64])),
'bc3_2':tf.Variable(tf.random_normal([64])),
'bc3_3':tf.Variable(tf.random_normal([64])),
'bc3_4':tf.Variable(tf.random_normal([64])),
'bc4_1':tf.Variable(tf.random_normal([128])),
'bc4_2':tf.Variable(tf.random_normal([128])),
'bc4_3':tf.Variable(tf.random_normal([128])),
'bc4_4':tf.Variable(tf.random_normal([128])),
'bc5_1':tf.Variable(tf.random_normal([256])),
'bc5_2':tf.Variable(tf.random_normal([256])),
'bc5_3':tf.Variable(tf.random_normal([256])),
'bc5_4':tf.Variable(tf.random_normal([256])),
'fc1':tf.Variable(tf.random_normal([4096])),
'fc2':tf.Variable(tf.random_normal([1024])),
'fc3':tf.Variable(tf.random_normal([10]))

}

#开始定义VGG16网络

def VGG(x,weight,bias,keep_prob):
inp=tf.reshape(x,[-1,64,192,3])
#conv1
conv1_1=conv2d('conv1_1',inp,weight['wc1_1'],bias['bc1_1']);
conv1_2=conv2d('conv1_2',conv1_1,weight['wc1_2'],bias['bc1_2']);
pool1=maxpool2d('pool1',conv1_2);

#conv2
conv2_1=conv2d('conv2_1',pool1,weight['wc2_1'],bias['bc2_1']);
conv2_2=conv2d('conv2_2',conv2_1,weight['wc2_2'],bias['bc2_2']);
pool2=maxpool2d('pool2',conv2_2);

#conv3
conv3_1=conv2d('conv3_1',pool2,weight['wc3_1'],bias['bc3_1']);
conv3_2=conv2d('conv3_2',conv3_1,weight['wc3_2'],bias['bc3_2']);
conv3_3=conv2d('conv3_3',conv3_2,weight['wc3_3'],bias['bc3_3']);

#conv3_4=conv2d('conv3_4',conv3_3,weight['wc3_4'],bias['bc3_4']);

#由于是从VGG19改动过来的,所以有的层数直接注释掉

pool3=maxpool2d('pool3',conv3_3);
#conv4
conv4_1=conv2d('conv4_1',pool3,weight['wc4_1'],bias['bc4_1']);
conv4_2=conv2d('conv4_2',conv4_1,weight['wc4_2'],bias['bc4_2']);
conv4_3=conv2d('conv4_3',conv4_2,weight['wc4_3'],bias['bc4_3']);
#conv4_4=conv2d('conv4_4',conv4_3,weight['wc4_4'],bias['bc4_4']);
pool4=maxpool2d('pool4',conv4_3);
#conv5
conv5_1=conv2d('conv5_1',pool4,weight['wc5_1'],bias['bc5_1']);
conv5_2=conv2d('conv5_2',conv5_1,weight['wc5_2'],bias['bc5_2']);
conv5_3=conv2d('conv5_3',conv5_2,weight['wc5_3'],bias['bc5_3']);
#conv5_4=conv2d('conv5_4',conv5_3,weight['wc5_4'],bias['bc5_4']);
pool5=maxpool2d('pool5',conv5_3);

#Conert to list from two-dimion

#将pool5 层的输出转化为一维list

xc=tf.reshape(pool5,[-1, weight['fc1'].get_shape().as_list()[0]])
fc1=tf.nn.relu(tf.add(tf.matmul(xc,weight['fc1']),bias['fc1']))
#dropout
fc1=tf.nn.dropout(fc1,keep_prob)


#fc2
fc2=tf.nn.relu(tf.add(tf.matmul(fc1,weight['fc2']),bias['fc2']))
#dropout
fc2=tf.nn.dropout(fc2,keep_prob)


#fc3

#在最后一层没有使用softmax函数是因为在loss函数定义中使用了,当然也可以在这里后面加一个tf.nn.softmax()

fc3=tf.add(tf.matmul(fc2,weight['fc3']),bias['fc3'])


#out=fc3;
return fc3


pred=VGG(x,weight,bias,keep_prob)

#defind the cost function and optimizer

#定义损失函数,这里学习率是采用AdamOptimizer

cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=pred))
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

#defind the evalute function
correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))




#defind the Super parameter
#定义训练过程使用的超参数

training_step=1
display_step=5
dropout=0.5
train_flag=True


#initilializer all variable
init=tf.global_variables_initializer()

with tf.Session() as sess:

#数据的读取

img, label = read_and_decode("radio_train_VGG.tfrecords")
img_batch, label_batch = tf.train.shuffle_batch([img, label],
batch_size, capacity=2000,
min_after_dequeue=1500)
sess.run(init)
threads = tf.train.start_queue_runners(sess=sess)


#saver the parameter of the VGG net
saver=tf.train.Saver()
saver.restore(sess, "model.ckpt")

#use tensorbord to view the graph
#merged = tf.summary.merge_all()
#writer = tf.summary.FileWriter("/tmp/mnist", sess.graph_def)

#这里的train——flag是方便我进行调试时候使用的,如果为ture,那么进行训练,如果为flase,进行可视化

#如果不用,可以删除


      if train_flag:#可删除

for step in range(training_step):

            batch_xs,batch_ys=sess.run([img_batch, label_batch])


batch_x=batch_xs
            batch_y=made_label(batch_ys)

sess.run(optimizer,feed_dict={x:batch_x,y:batch_y,
keep_prob:dropout})

if step % display_step == 0:
loss,acc=sess.run([cost,accuracy],feed_dict={
x:batch_x,y:batch_y,keep_prob:1.0})
print("Epoch:", '%04d' % (step),
          "cost=", "{:.5f}".format(loss),
"Acc=", "{:.9f}".format(acc))

print("finish the training!")
saver_path = saver.save(sess, "model.ckpt") 

else#可删除

............这里是我可视化的代码,一般大家用不上,我就没写上去



PS:转载请注明出去,欢迎私信交流 微笑


















评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值