Morvan教程笔记2019.9.15——卷积神经网络CNN

卷积神经网络(Convolutional Neural Network)的一般结构

从下到上
在这里插入图片描述

tf.nn.max_pool( x , ksize , strides , padding)

原文链接:https://blog.csdn.net/m0_37586991/article/details/84575325

MorvanTest18_mnist-CNN

本程序中出现并解决的一些问题:

1.下载mnist数据集:
下载成功会显示
Extracting MNIST_data\train-images-idx3-ubyte.gz
Extracting MNIST_data\train-labels-idx1-ubyte.gz
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz

2.ValueError: None values not supported
函数中没有return

分类问题

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

下载mnist数据集

# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data' , one_hot=True)
WARNING: Logging before flag parsing goes to stderr.
W0915 16:55:46.437091 11960 deprecation.py:323] From <ipython-input-2-8449282bccfa>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
W0915 16:55:46.470529 11960 deprecation.py:323] From C:\Users\24301\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
W0915 16:55:46.475519 11960 deprecation.py:323] From C:\Users\24301\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.


Extracting MNIST_data\train-images-idx3-ubyte.gz


W0915 16:55:46.815388 11960 deprecation.py:323] From C:\Users\24301\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
W0915 16:55:46.822374 11960 deprecation.py:323] From C:\Users\24301\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
W0915 16:55:46.912207 11960 deprecation.py:323] From C:\Users\24301\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.


Extracting MNIST_data\train-labels-idx1-ubyte.gz
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz

定义增加神经层的函数

#add one more layer and return the output of this layer
'''
def add_layer(inputs , in_size , out_size , activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size , out_size]))
    biases = tf.Variable(tf.zeros([1 , out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs , Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs
'''
'\ndef add_layer(inputs , in_size , out_size , activation_function=None):\n    Weights = tf.Variable(tf.random_normal([in_size , out_size]))\n    biases = tf.Variable(tf.zeros([1 , out_size]) + 0.1)\n    Wx_plus_b = tf.matmul(inputs , Weights) + biases\n    if activation_function is None:\n        outputs = Wx_plus_b\n    else:\n        outputs = activation_function(Wx_plus_b)\n    return outputs\n'

定义函数

# 评价准确率的函数
def compute_accuracy(v_xs , v_ys):
    global prediction
    y_pre = sess.run(prediction , feed_dict={xs:v_xs , keep_prob:1})
    correct_prediction = tf.equal(tf.argmax(y_pre ,1) , tf.argmax(v_ys , 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction , tf.float32))
    result = sess.run(accuracy , feed_dict={xs:v_xs , ys:v_ys , keep_prob:1})
    return result
# weight函数
def weight_variable(shape):
    #tf.truncated_normal(shape , mean ,stddev),stddev是标准差,神经网络中取随机数
    initial = tf.truncated_normal(shape , stddev=0.1)
    return tf.Variable(initial)
# bias函数
def bias_variable(shape):
    initial = tf.constant(0.1 , shape=shape)#常数,初始值是0.1
    return tf.Variable(initial)
# 卷积层函数
def conv2d(x , W):
    #stride[1 , x_movement , y_movement ,1]定义步长
    return tf.nn.conv2d(x , W , strides=[1,1,1,1] , padding='SAME')
# 池化函数
def max_pool_2x2(x):
    return tf.nn.max_pool(x , ksize=[1,2,2,1] , strides=[1,2,2,1] , padding='SAME')

定义输入的placeholder

# define placeholder for inputs to network
# 每一张图片有28*28=784个像素点 
xs = tf.placeholder(tf.float32 , [None , 784])
# 每一张图片有10个输出
ys = tf.placeholder(tf.float32 , [None , 10])

keep_prob = tf.placeholder(tf.float32)

x_image = tf.reshape(xs , [-1 , 28 , 28 , 1])
#print(x_image.shape) # [n_samples , 28 , 28 , 1]

添加神经层

# add output layer
#prediction = add_layer(xs , 784 , 10 , activation_function=tf.nn.softmax)
## conv1 layer ##
W_conv1 = weight_variable([5 , 5 , 1 ,32])#patch:5x5 , height/in_size(黑白/mnist只有一个通道):1 ,out_size/卷积核:32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image , W_conv1) + b_conv1)# out_size = 28x28x32
h_pool1 = max_pool_2x2(h_conv1) # out_size = 14x14x32

## conv2 layer ##
W_conv2 = weight_variable([5 , 5 , 32 , 64])#patch:5x5 , height/in_size:32 , out_size/卷积核:64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1 , W_conv2) + b_conv2)# out_size = 14x14x64
h_pool2 = max_pool_2x2(h_conv2) # out_size = 7x7x64

## func1 layer ##
W_fc1 = weight_variable([7*7*64 , 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2 , [-1 , 7*7*64])#展平
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat , W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1 , keep_prob)

## func2 layer ##
W_fc2 = weight_variable([1024 , 10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop , W_fc2) + b_fc2)

W0915 16:55:47.821513 11960 deprecation.py:506] From <ipython-input-11-72a07212c759>:18: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.

计算损失率(loss),softmax+cross_entropy用于分类算法

# the error between prediction and real data
# loss
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),reduction_indices=[1]))       # loss

训练方式

#train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

创建会话

sess = tf.Session()

初始化

sess.run(tf.global_variables_initializer())

训练,逐个提取100个来进行训练,节省时间

for i in range(1000):
    batch_xs , batch_ys = mnist.train.next_batch(100)
    sess.run(train_step , feed_dict={xs:batch_xs , ys:batch_ys , keep_prob:1})
    if i%50 == 0:
        print(compute_accuracy(mnist.test.images , mnist.test.labels))
0.0831
0.8059
0.891
0.9209
0.9333
0.9377
0.9451
0.9484
0.9486
0.9549
0.9527
0.9633
0.9563
0.9626
0.9648
0.9695
0.9624



---------------------------------------------------------------------------

KeyboardInterrupt                         Traceback (most recent call last)

<ipython-input-16-48c30f2dad48> in <module>
      3     sess.run(train_step , feed_dict={xs:batch_xs , ys:batch_ys , keep_prob:1})
      4     if i%50 == 0:
----> 5         print(compute_accuracy(mnist.test.images , mnist.test.labels))


<ipython-input-4-7d7d3d33cf47> in compute_accuracy(v_xs, v_ys)
      2 def compute_accuracy(v_xs , v_ys):
      3     global prediction
----> 4     y_pre = sess.run(prediction , feed_dict={xs:v_xs , keep_prob:1})
      5     correct_prediction = tf.equal(tf.argmax(y_pre ,1) , tf.argmax(v_ys , 1))
      6     accuracy = tf.reduce_mean(tf.cast(correct_prediction , tf.float32))


~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    948     try:
    949       result = self._run(None, fetches, feed_dict, options_ptr,
--> 950                          run_metadata_ptr)
    951       if run_metadata:
    952         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)


~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1171     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1172       results = self._do_run(handle, final_targets, final_fetches,
-> 1173                              feed_dict_tensor, options, run_metadata)
   1174     else:
   1175       results = []


~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1348     if handle is None:
   1349       return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1350                            run_metadata)
   1351     else:
   1352       return self._do_call(_prun_fn, handle, feeds, fetches)


~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1354   def _do_call(self, fn, *args):
   1355     try:
-> 1356       return fn(*args)
   1357     except errors.OpError as e:
   1358       message = compat.as_text(e.message)


~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
   1339       self._extend_graph()
   1340       return self._call_tf_sessionrun(
-> 1341           options, feed_dict, fetch_list, target_list, run_metadata)
   1342 
   1343     def _prun_fn(handle, feed_dict, fetch_list):


~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
   1427     return tf_session.TF_SessionRun_wrapper(
   1428         self._session, options, feed_dict, fetch_list, target_list,
-> 1429         run_metadata)
   1430 
   1431   def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):


KeyboardInterrupt: 
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值