Tensorflow 卷积结构模型

Tensorflow 卷积结构模型

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
print ("packs loaded")
packs loaded
tf.compat.v1.disable_eager_execution()
mnist = input_data.read_data_sets('data/', one_hot=True)

trainimg   = mnist.train.images
trainlabel = mnist.train.labels
testimg    = mnist.test.images
testlabel  = mnist.test.labels

print ("MNIST ready")
WARNING:tensorflow:From <ipython-input-4-3a92e645b733>:1: read_data_sets (from tensorflow.examples.tutorials.mnist.input_data) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as: tensorflow_datasets.load('mnist')
WARNING:tensorflow:From d:\progra~2\python\virtua~1\py37_x64\lib\site-packages\tensorflow_core\examples\tutorials\mnist\input_data.py:297: _maybe_download (from tensorflow.examples.tutorials.mnist.input_data) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From d:\progra~2\python\virtua~1\py37_x64\lib\site-packages\tensorflow_core\examples\tutorials\mnist\input_data.py:299: _extract_images (from tensorflow.examples.tutorials.mnist.input_data) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting data/train-images-idx3-ubyte.gz
WARNING:tensorflow:From d:\progra~2\python\virtua~1\py37_x64\lib\site-packages\tensorflow_core\examples\tutorials\mnist\input_data.py:304: _extract_labels (from tensorflow.examples.tutorials.mnist.input_data) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting data/train-labels-idx1-ubyte.gz
WARNING:tensorflow:From d:\progra~2\python\virtua~1\py37_x64\lib\site-packages\tensorflow_core\examples\tutorials\mnist\input_data.py:112: _dense_to_one_hot (from tensorflow.examples.tutorials.mnist.input_data) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From d:\progra~2\python\virtua~1\py37_x64\lib\site-packages\tensorflow_core\examples\tutorials\mnist\input_data.py:328: _DataSet.__init__ (from tensorflow.examples.tutorials.mnist.input_data) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/_DataSet.py from tensorflow/models.
MNIST ready

卷积结构模型

在这里插入图片描述

n_input  = 784
n_output = 10
weights  = {
        'wc1': tf.Variable(tf.random.normal([3, 3, 1, 64], stddev=0.1)),
        'wc2': tf.Variable(tf.random.normal([3, 3, 64, 128], stddev=0.1)),
        'wd1': tf.Variable(tf.random.normal([7*7*128, 1024], stddev=0.1)),
        'wd2': tf.Variable(tf.random.normal([1024, n_output], stddev=0.1))
    }
biases   = {
        'bc1': tf.Variable(tf.random.normal([64], stddev=0.1)),
        'bc2': tf.Variable(tf.random.normal([128], stddev=0.1)),
        'bd1': tf.Variable(tf.random.normal([1024], stddev=0.1)),
        'bd2': tf.Variable(tf.random.normal([n_output], stddev=0.1))
    }
WARNING:tensorflow:From d:\progra~2\python\virtua~1\py37_x64\lib\site-packages\tensorflow_core\python\ops\resource_variable_ops.py:1635: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
def conv_basic(_input, _w, _b, _keepratio):
        # INPUT
        _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])
        
        # CONV LAYER 1
        # 卷积
        _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
        #_mean, _var = tf.nn.moments(_conv1, [0, 1, 2])
        #_conv1 = tf.nn.batch_normalization(_conv1, _mean, _var, 0, 1, 0.0001)
        # 激活函数
        _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1']))
        # 卷积池
        _pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        # 随机去除一些节点,_keepratio饱和度比例
        _pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
        
        # CONV LAYER 2
        _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME')
        #_mean, _var = tf.nn.moments(_conv2, [0, 1, 2])
        #_conv2 = tf.nn.batch_normalization(_conv2, _mean, _var, 0, 1, 0.0001)
        _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2']))
        _pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        _pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
        
        # VECTORIZE
        _dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])
        # FULLY CONNECTED LAYER 1
        _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
        _fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
        
        # FULLY CONNECTED LAYER 2
        _out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
        
        # RETURN
        out = { 
            'input_r': _input_r,
            'conv1': _conv1, 'pool1': _pool1, 'pool1_dr1': _pool_dr1,
            'conv2': _conv2, 'pool2': _pool2, 'pool_dr2': _pool_dr2,
            'dense1': _dense1,
            'fc1': _fc1, 'fc_dr1': _fc_dr1,
            'out': _out
        }
        return out
print ("CNN READY")
CNN READY

AttributeError: module ‘tensorflow’ has no attribute ‘Print’

  • tf.Print(a, [a], "a: ")
  • tf.compat.v1.Print(a, [a], "a: ")

测试案例

a = tf.Variable(tf.random.normal([3, 3, 1, 64], stddev=0.1))
print (a)

init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)

# a = tf.Print(a, [a], "a: ")
a = tf.compat.v1.Print(a, [a], "a: ")

sess.run(a)
#print (help(tf.nn.conv2d))
# print (help(tf.nn.max_pool))

AttributeError: module ‘tensorflow_core._api.v2.train’ has no attribute ‘AdamOptimizer’

  • tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
  • tf.compat.v1.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
x = tf.compat.v1.placeholder(tf.float32, [None, n_input])
y = tf.compat.v1.placeholder(tf.float32, [None, n_output])
keepratio = tf.compat.v1.placeholder(tf.float32)

# FUNCTIONS
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))

# optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
optm = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) 
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) 

    
# SAVER
print ("GRAPH READY")
GRAPH READY
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)

training_epochs = 15
batch_size      = 100
display_step    = 5

for epoch in range(training_epochs):
    avg_cost = 0.
    # total_batch = int(mnist.train.num_examples/batch_size)
    total_batch = 10  # 比较耗时,所以此处指定为10
    # Loop over all batches
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        # Fit training using batch data
        sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
        
        # Compute average loss
        avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/total_batch

    # Display logs per epoch step
    if epoch % display_step == 0: 
        print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
        
        train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
        print (" Training accuracy: %.3f" % (train_acc))
        
        test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel, keepratio:1.})
        print (" Test accuracy: %.3f" % (test_acc))

print ("OPTIMIZATION FINISHED")
Epoch: 000/015 cost: nan
 Training accuracy: 0.100
 Test accuracy: 0.098
Epoch: 005/015 cost: nan
 Training accuracy: 0.100
 Test accuracy: 0.098
Epoch: 010/015 cost: nan
 Training accuracy: 0.080
 Test accuracy: 0.098
OPTIMIZATION FINISHED

 Test accuracy: 0.098
Epoch: 005/015 cost: nan
 Training accuracy: 0.100
 Test accuracy: 0.098
Epoch: 010/015 cost: nan
 Training accuracy: 0.080
 Test accuracy: 0.098
OPTIMIZATION FINISHED
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值