tensorflow学习(六)tensorflow中使用卷积神经网络CNN

在b站上看视频学习tensorflow基本知识,同时记录一下~
由于课程第一节讲的是环境搭建,那部分自己完成了,没有听课,所以就从(二)开始吧

  • 视频内容:06-1;06-2
  • 主要内容:卷积神经网络CNN的讲解,以及用CNN解决MNIST分类问题。
  • 代码链接参考github,由于所有的代码都是在Jupyter Notebook中进行编写以及运行,所有代码编写过程中有很多次运行,查看输出结果。
  • 代码格式是.ipynb格式,查看代码方法:已经安装好了Anaconda等环境,打开终端,输入Jupyter Notebook,就可以按照路径打开代码了。

6-1:讲解CNN基本知识

可以参考:
博客一:CNN之自我理解
博客二:零基础入门深度学习

6-2:tensorflow中如何使用卷积

  • 首先知道用到tf的函数:tf.nn.conv2d以及tf.nn.max_pool
  • 卷积层自己定义一个卷积函数
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
    #x是输入tensor [batch, in_height,in_width,in_cahnnels] 这里batch=100
    #W:filter/kernel 也是一个tensor [filter_height , filter_width , in_channels , out_channels]
    #stride: stride[0]=stride[3]=1   stride[1]:代表x方向的步长  stride[2]:代表y方向的步长
    #padding : SAME(补0)和VALID(不补0)
  • 自己定义一个池化层函数
#定义池化层
def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    #池化窗口2*2  横向纵向每次移动2个格子
  • 非线性激活函数:tf.nn.relu
  • 描述卷积层的时候,应当需要,先定义权重和偏置,再描述卷积操作。

完整代码如下

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
n_batch = mnist.train.num_examples // batch_size

#初始化权值
def weight_variable(shape):
    return tf.Variable(tf.truncated_normal(shape,stddev=0.1))

#初始偏置
def bias_vairable(shape):
    return tf.Variable(tf.constant(0.1, shape=shape))

#定义卷及操作,调用了一个库
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')#至于函数内部是如何卷积的我们并不需要知道,只需要知道格式就可以。
    #x是输入tensor [batch, in_height,in_width,in_cahnnels] 这里batch=100
    #W:filter/kernel 也是一个tensor [filter_height , filter_width , in_channels , out_channels]
    #stride: stride[0]=stride[3]=1   stride[1]:代表x方向的步长  stride[2]:代表y方向的步长
    #padding : SAME(补0)和VALID(不补0)

#定义池化层
def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    #池化窗口2*2  横向纵向每次移动2个格子

x = tf.placeholder(tf.float32,[None,784]) #None:代表行 
y = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)

x_image = tf.reshape(x,[-1,28,28,1]) #转换形状 -1代表批次大小 28长  28宽 把图片复原为原来的样子  1灰白图片

W_conv1 = weight_variable([5,5,1,32]) # 5*5的采样窗口,1:输入通道数  32:输出的通道数   32个卷积核从1个平面抽取特征
b_conv1 = bias_vairable([32]) #每个卷积核一个偏置值

# 28*28*1 的图片卷积之后变为28*28*32
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# 池化之后变为 14*14*32
h_pool1 = max_pool_2x2(h_conv1)

# 第二次卷积之后变为 14*14*64
W_conv2 = weight_variable([5,5,32,64]) #刚才的输出是32个通道,所以这里是32
b_conv2 = bias_vairable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)
# 第二次池化之后变为 7*7*64
h_pool2 = max_pool_2x2(h_conv2)

#28*28的图片 第一次卷积后还是28*28 第一次池化后变为14*14
#第二次卷积后为14*14, 第二次池化后变为7*7
#经过上面的操作后得到64张7*7的平面

# 第一个全连接层
W_fc1 = weight_variable([7*7*64,1024])#上一层神经元7*7*64个  全连接层1024个神经元
b_fc1 = bias_vairable([1024])

# 7*7*64的图像变成1维向量
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# 第二个全连接层
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_vairable([10])
logits = tf.matmul(h_fc1_drop,W_fc2) + b_fc2
prediction = tf.nn.sigmoid(logits)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

prediction_2 = tf.nn.softmax(prediction)
correct_prediction = (tf.equal(tf.argmax(prediction_2,1), tf.argmax(y,1)))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(21):
        for batch in range(n_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_step, feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})
        acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})
        print("Iter: " + str(epoch) + ", acc: " + str(acc))

加上tensorboard的代码如下:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 128
n_batch = mnist.train.num_examples // batch_size
max_step = 1000
keep_ = 0.8
log_dir = "Logs/log-6.1"

# 生成权重
def weight_variable(shape):
    return tf.Variable(tf.truncated_normal(shape,stddev=0.1),name='W')

# 生成偏差
def bias_vairable(shape):
    return tf.Variable(tf.constant(0.1, shape=shape),name='b')

# 记录变量
def variable_summaries(var):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)

def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME',name='conv2d')
        
def conv_layer(input_tensor, weight_shape, layer_name, act=tf.nn.relu):
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            weights = weight_variable(weight_shape)
            variable_summaries(weights)
        with tf.name_scope('biases'):
            biases = bias_vairable([weight_shape[-1]])
            variable_summaries(biases)
        with tf.name_scope('conv_comput'):
            preactivate = conv2d(input_tensor,weights) + biases
        with tf.name_scope('activate'):
            activations = act(preactivate)
        return activations

def linear_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            weights = weight_variable([input_dim, output_dim])
            variable_summaries(weights)
        with tf.name_scope('biases'):
            biases = bias_vairable([output_dim])
            variable_summaries(biases)
        with tf.name_scope('linear_comput'):
            preactivate = tf.matmul(input_tensor,weights) + biases
        with tf.name_scope('activate'):
            activations = act(preactivate)
        return activations
        

def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='Max_pool')

with tf.name_scope('Input'):
    x = tf.placeholder(tf.float32,[None,784],name='input_x')
    with tf.name_scope('Input_reshape'):
        x_image = tf.reshape(x,[-1,28,28,1],name='x-image')
        tf.summary.image('input',x_image,10)
    y = tf.placeholder(tf.float32,[None,10],name='input_y')
    keep_prob = tf.placeholder(tf.float32,name='keep_prob')

# 第一次卷积   28*28*1->28*28*32
conv_layer1 = conv_layer(x_image,[5,5,1,32],'conv_layer1')
# 池化之后变为 14*14*32
with tf.name_scope('Max_pool1'):
    h_pool1 = max_pool_2x2(conv_layer1)

# 第二次卷积 14*14*32->14*14*64
conv_layer2 = conv_layer(h_pool1,[5,5,32,64],'conv_layer2')
# 第二次池化之后变为 7*7*64
with tf.name_scope('Max_pool2'):
    h_pool2 = max_pool_2x2(conv_layer2)

with tf.name_scope('Flatten'):
    flatten_ = tf.reshape(h_pool2,[-1,7*7*64])
    
# 第一个全连接层 7*7*64 - 1024
fc1 = linear_layer(flatten_, 7*7*64, 1024, 'FC1')

with tf.name_scope('Dropput'):
    fc1_drop = tf.nn.dropout(fc1, keep_prob)
    
# 第二个全连接层 1024 - 10
logits = linear_layer(fc1_drop, 1024, 10, 'FC2',act=tf.nn.sigmoid)

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
    tf.summary.scalar('loss',loss)
with tf.name_scope('train'):
    train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

with tf.name_scope('accuracy'):
    prediction = tf.nn.softmax(logits)
    correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    tf.summary.scalar('accuracy', accuracy)
    
merged = tf.summary.merge_all()

def get_dict(train):
    if train:
        xs, ys = mnist.train.next_batch(batch_size)
        k = keep_
    else:
        xs, ys = mnist.test.images, mnist.test.labels
        k = 1.0
    return {x:xs, y:ys, keep_prob: k}

with tf.Session() as sess:
    train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)
    test_writer = tf.summary.FileWriter(log_dir + '/test')
    
    sess.run(tf.global_variables_initializer())
    
    for i in range(max_step):
        if i%10 == 0:
            summary,acc = sess.run([merged,accuracy], feed_dict=get_dict(False))
            test_writer.add_summary(summary, i)
            print("Step: " + str(i) + ", acc: " + str(acc))
        else:
            summary,_ = sess.run([merged,train_step], feed_dict=get_dict(True))
            train_writer.add_summary(summary,i)
        
    train_writer.close()
    test_writer.close()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值