深度学习——学习总结(2)

卷积神经网络

接下来就进入到了CNN(卷积神经网络)的学习。
和前面一样,第一个代码还是为了了解卷积的底层逻辑,不使用api编写它的卷积过程。
上代码:

#写这段代码时思考下卷积的过程,不要光看代码,容易被扰乱思路
import tensorflow as tf

inputs =tf.ones([3,6,6,3])
features = tf.ones([3,3,3,2])

batch_size = inputs.shape[0]
in_channel = features.shape[2]
out_channel = features.shape[3]

width = int(features.shape[0])
height = (features.shape[1])

out_width = int(inputs.shape[1])-width+1
out_height = int(inputs.shape[2])-height+1

batches = []
for b in range(batch_size):
    feature_maps = []
    for n in range(out_channel):
        feature_map = []
        for k in range(in_channel):
            output = []
            for i in range(out_width):
                for j in range(out_height):
                    output.append(tf.reduce_sum(inputs[b,i:i+width,j:j+height,k]*features[:,:,k,n]))
            output_map = tf.stack(output)
            #stack把所有元素堆起来,拼成一个tensor
            output_map = tf.reshape(output_map,[out_width,out_height])  
            feature_map.append(output_map)    
        t_feature_map = tf.stack(feature_map,axis=-1)
        t_feature_map = tf.reduce_sum(t_feature_map,axis=-1)#这里用reduce——sum是要压成一张特征图
        feature_maps.append(t_feature_map)
    t_feature_maps = tf.stack(feature_maps,axis=-1)
    batches.append(t_feature_maps)
t_batches = tf.stack(batches)

sess = tf.Session()
out = sess.run(t_batches)
print(out)

MNIST数据集分类(CNN)
下面是使用卷积神经网络做的一个mnist数据集分类项目,其中还加入了tensorboard用于调参。CNN网络中我们需重点关注图片被各种尺寸的卷积层卷积后尺寸的变化。

# -*- coding: utf-8 -*-
"""
Created on Wed Dec  2 19:21:11 2020

@author: 14419
"""


import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('./datasets',one_hot=True,reshape=False)

#三种数
learning_rate = 0.01
batch_size = 128
epochs =10
#Tensorboard第一步
writer = tf.summary.FileWriter('logits')

tf.reset_default_graph()

inputs = tf.placeholder(tf.float32,[None,28,28,1])
labels = tf.placeholder(tf.float32,[None,10])

##Tensorboard第二步,把需要标记的属性标注出来
##weight_conv1 = tf.Variable(tf.random_normal([5,5,1,6],stddev=0.1))
##tf.summary.histogram('wc1',weight_conv1)
##多维用hustogram
##weight_conv2 = tf.Variable(tf.random_normal([5,5,6,16],stddev=0.1))
##tf.summary.histogram('wc2',weight_conv2)
##weight_flc1 = tf.Variable(tf.random_normal([5*5*16,128],stddev=0.1))
##weight_flc2 = tf.Variable(tf.random_normal([128,84],stddev=0.1))
##weight_output = tf.Variable(tf.random_normal([84,10],stddev=0.1))
##以上26--34可用这样表示
#weights = {'wc1':tf.Variable(tf.random_normal([5,5,1,6],stddev=0.1)),
#          'wc2':tf.Variable(tf.random_normal([5,5,6,16],stddev=0.1)),
#          'fc1':tf.Variable(tf.random_normal([5*5*16,128],stddev=0.1)),
#          'fc2':tf.Variable(tf.random_normal([128,84],stddev=0.1)),
#          'out':tf.Variable(tf.random_normal([84,10],stddev=0.1))}
#
#biases = {'bc1':tf.Variable(tf.zeros([6])),
#          'bc2':tf.Variable(tf.zeros([16])),
#          'bf1':tf.Variable(tf.zeros([128])),
#          'bf2':tf.Variable(tf.zeros([84])),
#          'bout':tf.Variable(tf.zeros([10]))
#              }

#conv1 = tf.nn.conv2d(inputs,weights['wc1'],strides=[1,1,1,1],padding='SAME')
#conv1 = tf.nn.relu(conv1+biases['bc1'])
##tf.summary.histogram('conv1',conv1)
#pool1 = tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],padding='SAME')
#conv2 = tf.nn.conv2d(pool1,weights['wc2'],strides=[1,1,1,1],padding='VALID')
#conv2 = tf.nn.relu(conv2+biases['bc2'])
##tf.summary.histogram('conv2',conv2)
#pool2 = tf.nn.max_pool(conv2,[1,2,2,1],[1,2,2,1],padding='SAME')

#flatten1 = tf.reshape(pool2,[-1,pool2.shape[1]*pool2.shape[2]*pool2.shape[3]])
#fc1 = tf.matmul(flatten1,weights['fc1'])+biases['bf1']
#fc2 = tf.matmul(fc1,weights['fc2'])+biases['bf2']
#logits = tf.matmul(fc2,weights['out'])+biases['bout']
#35--60行可写为:
conv1 = tf.layers.conv2d(inputs,6,(5,5),padding='SAME',activation=tf.nn.relu,\
                         kernel_initializer=tf.random_normal_initializer(stddev=0.1))
pool1 = tf.layers.max_pooling2d(conv1,(2,2),(2,2),padding='SAME')
conv2 = tf.layers.conv2d(pool1,16,(5,5),padding='SAME',activation=tf.nn.relu,\
                         kernel_initializer=tf.random_normal_initializer(stddev=0.1))
pool2 = tf.layers.max_pooling2d(conv2,(2,2),(2,2),padding='SAME')
flatten1 = tf.layers.flatten(pool2)
fc1 = tf.layers.dense(flatten1,128,activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1,84,activation=tf.nn.relu)
logits = tf.layers.dense(fc2,10)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,\
                                                              labels=labels))
tf.summary.scalar('cost',cost)
#cost标量用scalar
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
corrected_prediction = tf.equal(tf.arg_max(logits,1),tf.arg_max(labels,1))
accuracy = tf.reduce_mean(tf.cast(corrected_prediction,tf.float32))
#cast把布尔值变为后面的类型

merge = tf.summary.merge_all()
#把标记的东西存起来
init = tf.global_variables_initializer()

config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
    sess.run(init)
    #Tensorboard第三步,在sess里面添加graph
    writer.add_graph(sess.graph)
    batch_num = mnist.train.num_examples//batch_size + 1
    for e in range(epochs):
        for i in range(batch_num):
            batches = mnist.train.next_batch(batch_size)
            #Tensorboard第四步,把每次的结果保存起来
            _,summary=sess.run([optimizer,merge],feed_dict={inputs:batches[0],\
                               labels:batches[1]})
            writer.add_summary(summary,e*batch_num+i)
        acc = sess.run(accuracy,feed_dict={inputs:mnist.validation.images,\
                                           labels:mnist.validation.labels})
        print('Epochs:{:<2} acc:{:.3f}'.format(e+1,acc))
    test_acc = sess.run(accuracy,feed_dict={inputs:mnist.test.images,\
                                            labels:mnist.test.labels})
    print('test acc:',test_acc)
    
#权值特别大可能就过拟合了,输出大网络有问题       
            
            


由于卷积在之后的各个领域都很常用,就不再举例讲cnn代码了,直接跳到下一部分,也是经常让我搞混的RNN

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值