【Tensorflow】#181114

神经网络Neural Network



神经网络:梯度下降Gradient Descent in Neural Nets

Optimization

Netwton's method
Least Squares method
Fradient Descent
Cost =(predicted -real)^2
	 =(Wx -y)^2
	 =(W -O)^2

神经网络得黑盒不黑Deep Understanding of Neural Nets


例子

import tensorflow as tf
import numpy as np

x_data =np.random.rand(100).astype(np.float32)
y_data =x_data *0.1 +0.3

Weight =tf.Variable(tf.random_uniform([1],-1.0,1.0))
biases =tf.Variable(tf.zeros([1]))

y =Weight *x_data +biases

loss =tf.reduce_mean(tf.square(y -y_data))
optimizer =tf.train.GradientDescentOptimizer(0.5)
train =optimizer.minimize(loss)

init =tf.initialize_all_variables()

sess =tf.Session()
sess.run(init)

for step in range(201):
	sess.run(train)
	if step % 20 ==0:
		print(step,sess.run(Weight),sess.run(biases))
		
D:\Test>python test.py
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2018-11-26 12:56:16.025459: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
0 [-0.303385] [0.6924046]
20 [-0.0240099] [0.36456507]
40 [0.06783367] [0.31674722]
60 [0.09165653] [0.304344]
80 [0.09783582] [0.30112678]
100 [0.09943865] [0.30029228]
120 [0.0998544] [0.30007583]
140 [0.09996223] [0.30001968]
160 [0.0999902] [0.3000051]
180 [0.09999748] [0.30000132]
200 [0.09999935] [0.30000034]

Session会话控制

import tensorflow as tf

matrix1 =tf.constant([[3,3]])
matrix2 =tf.constant([[2],[2]])

product =tf.matmul(matrix1,matrix2)

sess =tf.Session()
result =sess.run(product)
print(result)
sess.close()

D:\Test>python test.py
2018-11-26 13:12:33.440085: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
[[12]]
import tensorflow as tf

matrix1 =tf.constant([[3,3]])
matrix2 =tf.constant([[2],[2]])

product =tf.matmul(matrix1,matrix2)

with tf.Session() as sess:
    result2 =sess.run(product)
    print(result2)

D:\Test>python test.py
2018-11-26 13:14:13.044102: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
[[12]]

Variable变量

import tensorflow as tf

state =tf.Variable(0,name ='counter')

print(state.name)

D:\Test>python test.py
counter:0
import tensorflow as tf

state =tf.Variable(0,name ='counter')

one =tf.constant(1)

new_value =tf.add(state,one)
update =tf.assign(state,new_value)

init =tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)
        print(sess.run(state))

D:\Test>python test.py
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2018-11-26 13:19:57.721024: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
1
2
3

placeholder传入值

import tensorflow as tf

input1 =tf.placeholder(tf.float32)
input2 =tf.placeholder(tf.float32)

output =tf.multiply(input1,input2)

with tf.Session() as sess:
    print(sess.run(output,feed_dict ={input1:[7.],input2:[2.]}))

D:\Test>python test.py
2018-11-26 13:24:54.965202: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
[14.]

激励函数activation function

https://www.tensorflow.org/api_guides/python/nn

添加层def add_layer()

import tensorflow as tf

def add_layer(inputs,in_size,out_size,activation_function =None):
    Weights =tf.Variable(tf.random_normal([in_size,out_size]))
    biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
    Wx_plus_b =tf.matmul(input,Weights) +biases
    if activation_function is None:
        outputs =Wx_plus_b
    else:
        outputs =activation_function(Wx_plus_b)
    return outputs

建造神经网络build a neural network

from __future__ import print_function
import tensorflow as tf
import numpy as np

def add_layer(inputs,in_size,out_size,activation_function =None):
    Weights =tf.Variable(tf.random_normal([in_size,out_size]))
    biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
    Wx_plus_b =tf.matmul(inputs,Weights) +biases
    if activation_function is None:
        outputs =Wx_plus_b
    else:
        outputs =activation_function(Wx_plus_b)
    return outputs

x_data =np.linspace(-1,1,300)[:,np.newaxis]
noise =np.random.normal(0,0.05,x_data.shape)
y_data =np.square(x_data) -0.5 + noise

xs =tf.placeholder(tf.float32,[None,1])
ys =tf.placeholder(tf.float32,[None,1])

l1 =add_layer(xs,1,10,activation_function =tf.nn.relu)
prediction =add_layer(l1,10,1,activation_function =None)

loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys -prediction),reduction_indices =[1]))

train_step =tf.train.GradientDescentOptimizer(0.1).minimize(loss)

if int((tf.__version__).split('.')[1]) <12:
    init =tf.initialize_all_variables()
else:
    init =tf.global_variables_initializer()
sess =tf.Session()
sess.run(init)

for i in range(1000):
    sess.run(train_step,feed_dict ={xs:x_data,ys:y_data})
    if i % 50 ==0:
        print(sess.run(loss,feed_dict ={xs:x_data,ys:y_data}))

D:\Test>python test.py
2018-11-26 14:14:00.858152: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
0.28190356
0.011074964
0.008154809
0.0064126835
0.0055707386
0.0051101055
0.00479422
0.004595898
0.004425618
0.0042744284
0.004117039
0.0039718887
0.0038343314
0.003696498
0.0035931754
0.003494666
0.0034033188
0.003327237
0.003258068
0.0031993748

结果可视化plot result

from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

def add_layer(inputs,in_size,out_size,activation_function =None):
    Weights =tf.Variable(tf.random_normal([in_size,out_size]))
    biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
    Wx_plus_b =tf.matmul(inputs,Weights) +biases
    if activation_function is None:
        outputs =Wx_plus_b
    else:
        outputs =activation_function(Wx_plus_b)
    return outputs

x_data =np.linspace(-1,1,300)[:,np.newaxis]
noise =np.random.normal(0,0.05,x_data.shape)
y_data =np.square(x_data) -0.5 + noise

xs =tf.placeholder(tf.float32,[None,1])
ys =tf.placeholder(tf.float32,[None,1])

l1 =add_layer(xs,1,10,activation_function =tf.nn.relu)
prediction =add_layer(l1,10,1,activation_function =None)

loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys -prediction),reduction_indices =[1]))

train_step =tf.train.GradientDescentOptimizer(0.1).minimize(loss)

if int((tf.__version__).split('.')[1]) <12:
    init =tf.initialize_all_variables()
else:
    init =tf.global_variables_initializer()
sess =tf.Session()
sess.run(init)

fig =plt.figure()
ax =fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()

for i in range(1000):
    sess.run(train_step,feed_dict ={xs:x_data,ys:y_data})
    if i % 50 ==0:
        try:
            ax.lines.remove(lines[0])
        except Exception:
            pass
        prediction_value =sess.run(prediction,feed_dict ={xs:x_data})
        lines =ax.plot(x_data,prediction_value,'r-',lw =5)
        plt.pause(0.1)

优化器Optimizer加速神经网络训练(深度学习)

Stochastic Fradient Descent(SGD)

tensorboard可视化

import tensorflow as tf

def add_layer(inputs,in_size,out_size,activation_function =None):
    with tf.name_scope('laye'):
        with tf.name_scope('weights'):
            Weights =tf.Variable(tf.random_normal([in_size,out_size]))
        with tf.name_scope('biases'):
            biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
        with tf.name_scope('Wx_p'):
            Wx_plus_b =tf.matmul(inputs,Weights) +biases
        if activation_function is None:
            outputs =Wx_plus_b
        else:
            outputs =activation_function(Wx_plus_b,)
        return outputs

with tf.name_scope('input'):
    xs =tf.placeholder(tf.float32,[None,1],name ='x_input')
    ys =tf.placeholder(tf.float32,[None,1],name ='y_input')

l1 =add_layer(xs,1,10,activation_function =tf.nn.relu)
prediction =add_layer(l1,10,1,activation_function =None)

with tf.name_scope('loss'):
    loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys -prediction),reduction_indices =[1]))

with tf.name_scope('train'):
    train_step =tf.train.GradientDescentOptimizer(0.1).minimize(loss)

sess =tf.Session()
writer =tf.summary.FileWriter("logs/",sess.graph)
sess.run(tf.initialize_all_variables())

D:\Test>python test.py
2018-11-26 14:56:41.655404: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

D:\Test>tensorboard --logdir='logs/'
TensorBoard 1.12.0 at http://DESKTOP-AI0CBU4:6006 (Press CTRL+C to quit)
import tensorflow as tf
import numpy as np

def add_layer(inputs,in_size,out_size,n_layer,activation_function =None):
    layer_name ='layer%s' % n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            Weights =tf.Variable(tf.random_normal([in_size,out_size]),name ='W')
            tf.summary.histogram(layer_name +'/weights',Weights)
        with tf.name_scope('biases'):
            biases =tf.Variable(tf.zeros([1,out_size]) +0.1,name ='b')
            tf.summary.histogram(layer_name +'/biases',biases)
        with tf.name_scope('Wx_p'):
            Wx_plus_b =tf.matmul(inputs,Weights) +biases
        if activation_function is None:
            outputs =Wx_plus_b
        else:
            outputs =activation_function(Wx_plus_b,)
            tf.summary.histogram(layer_name +'/outputs',outputs)
        return outputs

x_data =np.linspace(-1,1,300)[:,np.newaxis]
noise =np.random.normal(0,0.05,x_data.shape)
y_data =np.square(x_data) -0.5 + noise

with tf.name_scope('input'):
    xs =tf.placeholder(tf.float32,[None,1],name ='x_input')
    ys =tf.placeholder(tf.float32,[None,1],name ='y_input')

l1 =add_layer(xs,1,10,n_layer =1,activation_function =tf.nn.relu)
prediction =add_layer(l1,10,1,n_layer =2,activation_function =None)

with tf.name_scope('loss'):
    loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys -prediction),reduction_indices =[1]))
    tf.summary.scalar('loss',loss)

with tf.name_scope('train'):
    train_step =tf.train.GradientDescentOptimizer(0.1).minimize(loss)

sess =tf.Session()
merged =tf.summary.merge_all()
writer =tf.summary.FileWriter("logs/",sess.graph)
sess.run(tf.initialize_all_variables())

for i in range(1000):
    sess.run(train_step,feed_dict ={xs:x_data,ys:y_data})
    if i % 50 == 0:
        result =sess.run(merged,feed_dict ={xs:x_data,ys:y_data})
    writer.add_summary(result,i)

D:\Test>python test.py
2018-11-26 15:20:09.031359: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

D:\Test>tensorboard --logdir='logs/'
TensorBoard 1.12.0 at http://DESKTOP-AI0CBU4:6006 (Press CTRL+C to quit)

Classification分类学习

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist =input_data.read_data_sets('MNIST_data',one_hot =True)

def add_layer(inputs,in_size,out_size,activation_function =None):
    Weights =tf.Variable(tf.random_normal([in_size,out_size]))
    biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
    Wx_plus_b =tf.matmul(inputs,Weights) +biases
    if activation_function is None:
        outputs =Wx_plus_b
    else:
        outputs =activation_function(Wx_plus_b,)
    return outputs

xs =tf.placeholder(tf.float32,[None,784])
ys =tf.placeholder(tf.float32,[None,10])

prediction =add_layer(xs,784,10,activation_function =tf.nn.softmax)

cross_entropy =tf.reduce_mean(-tf.reduce_sum(ys *tf.log(prediction),reduction_indices =[1]))

train_step =tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess =tf.Session()
sess.run(tf.initialize_all_variables())

def compute_accuracy(v_xs,v_ys):
    global prediction
    y_pre =sess.run(prediction,feed_dict ={xs:v_xs})
    correct_prediction =tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
    accuracy =tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    result =sess.run(accuracy,feed_dict ={xs:v_xs,ys:v_ys})
    return result


for i in range(1000):
    batch_xs,batch_ys =mnist.train.next_batch(100)
    sess.run(train_step,feed_dict ={xs:batch_xs,ys:batch_ys})
    if i % 50 ==0:
        print(compute_accuracy(mnist.test.images,mnist.test.labels))

D:\Test>python test.py
WARNING:tensorflow:From test.py:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
2018-11-26 16:42:05.884246: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
0.0973
0.6307
0.7385
0.7778
0.8023
0.8144
0.8286
0.8369
0.8413
0.8424
0.8538
0.8573
0.8582
0.863
0.8664
0.8664
0.8686
0.8683
0.8695
0.8719

过拟合(深度学习)



dropout解决overfitting问题

import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer

digits =load_digits()
X =digits.data
y =digits.target
y =LabelBinarizer().fit_transform(y)
X_train,X_test,y_train,y_test =train_test_split(X,y,test_size =.3)


def add_layer(inputs,in_size,out_size,layer_name,activation_function =None,):
    Weights =tf.Variable(tf.random_normal([in_size,out_size]))
    biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
    Wx_plus_b =tf.matmul(inputs,Weights) +biases
    if activation_function is None:
        outputs =Wx_plus_b
    else:
        outputs =activation_function(Wx_plus_b,)
    tf.summary.histogram(layer_name +'/outputs',outputs)
    return outputs

xs =tf.placeholder(tf.float32,[None,64])
ys =tf.placeholder(tf.float32,[None,10])

l1 =add_layer(xs,64,100,'l1',activation_function =tf.nn.tanh)
prediction =add_layer(l1,100,10,'l2',activation_function =tf.nn.softmax)

cross_entropy =tf.reduce_mean(-tf.reduce_sum(ys *tf.log(prediction),reduction_indices =[1]))

tf.summary.scalar('loss',cross_entropy)

train_step =tf.train.GradientDescentOptimizer(0.6).minimize(cross_entropy)

sess =tf.Session()
merged =tf.summary.merge_all()

train_writer =tf.summary.FileWriter("logs/train",sess.graph)
test_writer =tf.summary.FileWriter("logs/test",sess.graph)

sess.run(tf.initialize_all_variables())

for i in range(1000):
    sess.run(train_step,feed_dict ={xs:X_train,ys:y_train})
    if i % 50 ==0:
        train_result =sess.run(merged,feed_dict ={xs:X_train,ys:y_train})
        test_result =sess.run(merged,feed_dict ={xs:X_test,ys:y_test})
        train_writer.add_summary(train_result,i)
        test_writer.add_summary(test_result,i)

D:\Test>python test.py
2018-11-26 17:23:40.730750: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

D:\Test>tensorboard --logdir=logs
TensorBoard 1.12.0 at http://DESKTOP-AI0CBU4:6006 (Press CTRL+C to quit)
误差大
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer

digits =load_digits()
X =digits.data
y =digits.target
y =LabelBinarizer().fit_transform(y)
X_train,X_test,y_train,y_test =train_test_split(X,y,test_size =.3)


def add_layer(inputs,in_size,out_size,layer_name,activation_function =None,):
    Weights =tf.Variable(tf.random_normal([in_size,out_size]))
    biases =tf.Variable(tf.zeros([1,out_size]) +0.1)
    Wx_plus_b =tf.matmul(inputs,Weights) +biases
    Wx_plus_b =tf.nn.dropout(Wx_plus_b,keep_prob)
    if activation_function is None:
        outputs =Wx_plus_b
    else:
        outputs =activation_function(Wx_plus_b,)
    tf.summary.histogram(layer_name +'/outputs',outputs)
    return outputs

keep_prob =tf.placeholder(tf.float32)

xs =tf.placeholder(tf.float32,[None,64])
ys =tf.placeholder(tf.float32,[None,10])

l1 =add_layer(xs,64,50,'l1',activation_function =tf.nn.tanh)
prediction =add_layer(l1,50,10,'l2',activation_function =tf.nn.softmax)

cross_entropy =tf.reduce_mean(-tf.reduce_sum(ys *tf.log(prediction),reduction_indices =[1]))

tf.summary.scalar('loss',cross_entropy)

train_step =tf.train.GradientDescentOptimizer(0.6).minimize(cross_entropy)

sess =tf.Session()
merged =tf.summary.merge_all()

train_writer =tf.summary.FileWriter("logs/train",sess.graph)
test_writer =tf.summary.FileWriter("logs/test",sess.graph)

sess.run(tf.initialize_all_variables())

for i in range(1000):
    sess.run(train_step,feed_dict ={xs:X_train,ys:y_train,keep_prob:0.5})
    if i % 50 ==0:
        train_result =sess.run(merged,feed_dict ={xs:X_train,ys:y_train,keep_prob:1})
        test_result =sess.run(merged,feed_dict ={xs:X_test,ys:y_test,keep_prob:1})
        train_writer.add_summary(train_result,i)
        test_writer.add_summary(test_result,i)

D:\Test>python test.py
2018-11-26 17:41:24.523774: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

D:\Test>tensorboard --logdir=logs
TensorBoard 1.12.0 at http://DESKTOP-AI0CBU4:6006 (Press CTRL+C to quit)

CNN卷积神经网络

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist =input_data.read_data_sets('MNIST_data',one_hot =True)

def compute_accuracy(v_xs,v_ys):
    global prediction
    y_pre =sess.run(prediction,feed_dict ={xs:v_xs,keep_prob:1})
    correct_prediction =tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
    accuracy =tf.reuce_mean(tf.cast(correct_prediction,tf.float32))
    result =sess.run(accuracy,feed_dict ={xs:v_xs,ys:v_ys,keep_prob:1})
    return result

def weight_variable(shape):
    initial =tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial =tf.constant(0.1,shape =shape)
    return tf.Variable(initial)

def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides =[1,1,1,1],padding ='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize =[1,2,2,1],strides =[1,2,2,1],padding ='SAME')

xs =tf.placeholder(tf.float32,[None,784])
ys =tf.placeholder(tf.float32,[None,10])
keep_prob =tf.placeholder(tf.float32)
x_image =tf.reshape(xs,[-1,28,28,1])
print(x_image.shape)

W_conv1 =weight_variable([5,5,1,32])
#patch 5x5,in size 1,out size 32
b_conv1 =bias_variable([32])
h_conv1 =tf.nn.relu(conv2d(x_image,W_conv1) +b_conv1)
h_pool1 =max_pool_2x2(h_conv1)

W_conv2 =weight_variable([5,5,32,64])
b_conv2 =bias_variable([64])
h_conv2 =tf.nn.relu(conv2d(h_conv1,W_conv2) +b_conv2)
h_pool2 =max_pool_2x2(h_conv2)

W_fc1 =weight_variable([7*7*64,1024])
b_fc1 =bias_variable([1024])
h_pool2_flat =tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 =tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) +b_fc1)
h_fc1_drop =tf.nn.dropout(h_fc1,keep_prob)

W_fc2 =weight_variable([1024,10])
b_fc2 =bias_variable([10])
prediction =tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2) +b_fc2)

cross_entropy =tf.reduce_mean(-tf.reduce_sum(ys *tf.log(prediction),reduction_indices =[1]))
train_step =tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

sess =tf.Session()
sess.run(tf.initialize_all_variables())

for i in range(1000):
    batch_xs,batch_ys =mnist.train.next_batch(100)
    sess.run(train_step,feed_dict ={xs:batch_xs,ys:batch_ys,keep_prob:0.5})
    if i % 50 == 0:
        print(compute_accuracy(mnist.test.images,mnist.test.labels))
# View more python tutorial on my Youtube and Youku channel!!!

# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial

"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

def compute_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    # stride [1, x_movement, y_movement, 1]
    # Must have strides[0] = strides[3] = 1
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    # stride [1, x_movement, y_movement, 1]
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784])/255.   # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 28, 28, 1])
# print(x_image.shape)  # [n_samples, 28,28,1]

## conv1 layer ##
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 28x28x32
h_pool1 = max_pool_2x2(h_conv1)                                         # output size 14x14x32

## conv2 layer ##
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 14x14x64
h_pool2 = max_pool_2x2(h_conv2)                                         # output size 7x7x64

## fc1 layer ##
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

## fc2 layer ##
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)


# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
                                              reduction_indices=[1]))       # loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

sess = tf.Session()
# important step
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
    init = tf.initialize_all_variables()
else:
    init = tf.global_variables_initializer()
sess.run(init)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
    if i % 50 == 0:
        print(compute_accuracy(mnist.test.images[:1000], mnist.test.labels[:1000]))


D:\Test>python test.py
WARNING:tensorflow:From test.py:13: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
2018-11-26 19:26:35.601453: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
0.138
0.739
0.845
0.882
0.899
0.917
0.912
0.928
0.931
0.945
0.941
0.946
0.947
0.946
0.951
0.954
0.957
0.961
0.957
0.964

Saver保存读取

import tensorflow as tf

W =tf.Variable([[1,2,3],[3,4,5]],dtype =tf.float32,name ='weights')
b =tf.Variable([[1,2,3]],dtype =tf.float32,name ='biases')

init =tf.initialize_all_variables()

saver =tf.train.Saver()

with tf.Session() as sess:
    sess.run(init)
    save_path =saver.save(sess,"my_net/save_net.ckpt")
    print("Save to path:",save_path)

D:\Test>python test.py
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2018-11-26 19:57:51.067517: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Save to path: my_net/save_net.ckpt
from __future__ import print_function
import tensorflow as tf
import numpy as np

W =tf.Variable(np.arange(6).reshape((2,3)),dtype =tf.float32,name ="weights")
b =tf.Variable(np.arange(3).reshape((1,3)),dtype =tf.float32,name ="biases")

saver =tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess,"my_net/save_net.ckpt")
    print("weights:",sess.run(W))
    print("biases:",sess.run(b))

D:\Test>python test.py
2018-11-26 20:57:11.627128: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
weights: [[1. 2. 3.]
 [3. 4. 5.]]
biases: [[1. 2. 3.]]

RNN(深度学习)


LSTM

Long Short-Term Memory

RNN LSTM循环神经网络(分类)

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist =input_data.read_data_sets('MNIST_data',one_hot =True)

lr =0.001
training_iters =100000
batch_size =128

n_inputs =28
n_steps =28
n_hidden_units =128
n_classes =10

x =tf.placeholder(tf.float32,[None,n_steps,n_inputs])
y =tf.placeholder(tf.float32,[None,n_classes])

weights ={
    'in':tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),
    'out':tf.Variable(tf.random_normal([n_hidden_units,n_classes]))
}
biases ={
    'in':tf.Variable(tf.constant(0.1,shape =[n_hidden_units,])),
    'out':tf.Variable(tf.constant(0.1,shape =[n_classes]))
}

def RNN(X,weights,biases):
    X =tf.reshape(X,[-1,n_inputs])
    X_in =tf.matmul(X,weights['in']) +biases['in']
    X_in =tf.reshape(X_in,[-1,n_steps,n_hidden_units])

    lstm_cell =tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units,forget_bias =1.0,state_is_tuple =True)
    _init_state =lstm_cell.zero_state(batch_size,dtype =tf.float32)
    outputs,states =tf.nn.dynamic_rnn(lstm_cell,X_in,initial_state =_init_state,time_major =False)


    #第一种方法
    results =tf.matmul(states[1],weights['out'] +biases['out'])
    #第二种方法
    #outputs =tf.unstack(tf.transpose(outputs,[1,0,2]))
    #results =tf.matmul(outputs[-1],weights['out'] +biases['out'])
    return results

pred =RNN(x,weights,biases)
cost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits =pred,labels =y))
train_op =tf.train.AdamOptimizer(lr).minimize(cost)

correct_pred =tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy =tf.reduce_mean(tf.cast(correct_pred,tf.float32))

init =tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init)
    step =0
    while step *batch_size < training_iters:
        batch_xs,batch_ys =mnist.train.next_batch(batch_size)
        batch_xs =batch_xs.reshape([batch_size,n_steps,n_inputs])
        sess_xs =batch_xs.reshape([batch_size,n_steps,n_inputs])
        sess.run([train_op],feed_dict ={
            x:batch_xs,
            y:batch_ys,
            })
        if step % 20 ==0:
            print(sess.run(accuracy,feed_dict ={
                x:batch_xs,
                y:batch_ys
                }))
        step +=1

D:\Test>python test.py
WARNING:tensorflow:From test.py:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From test.py:32: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This class is deprecated, please use tf.nn.rnn_cell.LSTMCell, which supports all the feature this cell currently has. Please replace the existing code with tf.nn.rnn_cell.LSTMCell(name='basic_lstm_cell').
WARNING:tensorflow:From test.py:45: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:

Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.

See `tf.nn.softmax_cross_entropy_with_logits_v2`.

WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2018-11-26 22:51:21.185136: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
0.2265625
0.734375
0.7734375
0.796875
0.8828125
0.8828125
0.875
0.890625
0.90625
0.890625
0.921875
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist =input_data.read_data_sets('MNIST_data',one_hot =True)

lr =0.001
training_iters =100000
batch_size =128

n_inputs =28
n_steps =28
n_hidden_units =128
n_classes =10

x =tf.placeholder(tf.float32,[None,n_steps,n_inputs])
y =tf.placeholder(tf.float32,[None,n_classes])

weights ={
    'in':tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),
    'out':tf.Variable(tf.random_normal([n_hidden_units,n_classes]))
}
biases ={
    'in':tf.Variable(tf.constant(0.1,shape =[n_hidden_units,])),
    'out':tf.Variable(tf.constant(0.1,shape =[n_classes]))
}

def RNN(X,weights,biases):
    X =tf.reshape(X,[-1,n_inputs])
    X_in =tf.matmul(X,weights['in']) +biases['in']
    X_in =tf.reshape(X_in,[-1,n_steps,n_hidden_units])

    lstm_cell =tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units,forget_bias =1.0,state_is_tuple =True)
    _init_state =lstm_cell.zero_state(batch_size,dtype =tf.float32)
    outputs,states =tf.nn.dynamic_rnn(lstm_cell,X_in,initial_state =_init_state,time_major =False)


    #第一种方法
    #results =tf.matmul(states[1],weights['out'] +biases['out'])
    #第二种方法
    outputs =tf.unstack(tf.transpose(outputs,[1,0,2]))
    results =tf.matmul(outputs[-1],weights['out'] +biases['out'])
    return results

pred =RNN(x,weights,biases)
cost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits =pred,labels =y))
train_op =tf.train.AdamOptimizer(lr).minimize(cost)

correct_pred =tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy =tf.reduce_mean(tf.cast(correct_pred,tf.float32))

init =tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init)
    step =0
    while step *batch_size < training_iters:
        batch_xs,batch_ys =mnist.train.next_batch(batch_size)
        batch_xs =batch_xs.reshape([batch_size,n_steps,n_inputs])
        sess_xs =batch_xs.reshape([batch_size,n_steps,n_inputs])
        sess.run([train_op],feed_dict ={
            x:batch_xs,
            y:batch_ys,
            })
        if step % 20 ==0:
            print(sess.run(accuracy,feed_dict ={
                x:batch_xs,
                y:batch_ys
                }))
        step +=1


D:\Test>python test.py
WARNING:tensorflow:From test.py:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From test.py:32: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This class is deprecated, please use tf.nn.rnn_cell.LSTMCell, which supports all the feature this cell currently has. Please replace the existing code with tf.nn.rnn_cell.LSTMCell(name='basic_lstm_cell').
WARNING:tensorflow:From test.py:45: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:

Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.

See `tf.nn.softmax_cross_entropy_with_logits_v2`.

Traceback (most recent call last):
  File "test.py", line 45, in <module>
    cost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y))
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\util\deprecation.py", line 306, in new_func
    return func(*args, **kwargs)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 1947, in softmax_cross_entropy_with_logits
    logits)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 1759, in _ensure_xent_args
    "named arguments (labels=..., logits=..., ...)" % name)
ValueError: Only call `softmax_cross_entropy_with_logits` with named arguments (labels=..., logits=..., ...)

D:\Test>python test.py
WARNING:tensorflow:From test.py:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From test.py:32: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This class is deprecated, please use tf.nn.rnn_cell.LSTMCell, which supports all the feature this cell currently has. Please replace the existing code with tf.nn.rnn_cell.LSTMCell(name='basic_lstm_cell').
WARNING:tensorflow:From test.py:45: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:

Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.

See `tf.nn.softmax_cross_entropy_with_logits_v2`.

WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2018-11-26 22:49:15.082806: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
0.15625
0.5703125
0.71875
0.875
0.84375
0.890625
0.8203125
0.8984375
0.9140625
0.90625
0.9140625
0.890625
0.921875
0.9375
0.90625
0.953125

RNN LSTM(reqression 回归)

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

BATCH_START =0
TIME_STEPS =20
BATCH_SIZE =50
INPUT_SIZE =1
OUTPUT_SIZE =1
CELL_SIZE =10
LR =0.006

def get_batch():
    global BATCH_START,TIME_STEPS
    xs =np.arange(BATCH_START,BATCH_START +TIME_STEPS *BATCH_SIZE).reshape((BATCH_SIZE,TIME_STEPS)) /(10 *np.pi)
    seq =np.sin(xs)
    res =np.cos(xs)
    BATCH_START +=TIME_STEPS
    #plt.plot(xs[0,:],res[0,:],'r',xs[0,:],seq[0,:],'b--')
    #plt.show()
    return [seq[:,:,np.newaxis],res[:,:,np.newaxis],xs]

class LSTMRNN(object):
    def __init__(self,n_steps,input_size,output_size,cell_size,batch_size):
        self.n_steps =n_steps
        self.input_size =input_size
        self.output_size =output_size
        self.cell_size =cell_size
        self.batch_size =batch_size
        with tf.name_scope('inputs'):
            self.xs =tf.placeholder(tf.float32,[None,n_steps,input_size],name ='xs')
            self.ys =tf.placeholder(tf.float32,[None,n_steps,output_size],name ='ys')
        with tf.variable_scope('in_hidden'):
            self.add_input_layer()
        with tf.variable_scope('LSTM_cell'):
            self.add_cell()
        with tf.variable_scope('out_hidden'):
            self.add_output_layer()
        with tf.name_scope('cost'):
            self.compute_cost()
        with tf.name_scope('train'):
            self.train_op =tf.train.AdamOptimizer(LR).minimize(self.cost)

    def add_input_layer(self,):
        l_in_x =tf.reshape(self.xs,[-1,self.input_size],name ='2_2D')
        Ws_in =self._weight_variable([self.input_size,self.cell_size])
        bs_in =self._bias_variable([self.cell_size,])

        with tf.name_scope('Wx_plus_b'):
            l_in_y =tf.matmul(l_in_x,Ws_in) +bs_in
        self.l_in_y =tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name ='2_3D')

    def add_cell(self):
        lstm_cell =tf.nn.rnn_cell.BasicLSTMCell(self.cell_size,forget_bias =1.0,state_is_tuple =True)
        with tf.name_scope('initial_state'):
            self.cell_init_state =lstm_cell.zero_state(self.batch_size,dtype =tf.float32)
        self.cell_outputs,self.cell_final_state =tf.nn.dynamic_rnn(
            lstm_cell,self.l_in_y,initial_state =self.cell_init_state,time_major =False)

    def add_output_layer(self):
        l_out_x =tf.reshape(self.cell_outputs,[-1,self.cell_size],name ='2_2D')
        Ws_out =self._weight_variable([self.cell_size,self.output_size])
        bs_out =self._bias_variable([self.output_size,])
        with tf.name_scope('Wx_plus_b'):
            self.pred =tf.matmul(l_out_x,Ws_out) +bs_out

    def compute_cost(self):
        losses =tf.contrib.legacy_seq2seq.sequence_loss_by_example(
            [tf.reshape(self.pred,[-1],name ='reshape_pred')],
            [tf.reshape(self.ys,[-1],name ='reshape_target')],
            [tf.ones([self.batch_size *self.n_steps],dtype =tf.float32)],
            average_across_timesteps =True,
            softmax_loss_function =self.ms_error,
            name  ='losses'
            )
        with tf.name_scope('average_cost'):
            self.cost =tf.div(
                tf.reduce_sum(losses,name ='losses_sum'),
                tf.cast(self.batch_size,tf.float32),
                name ='average_cost')
            tf.summary.scalar('cost',self.cost)

    [@staticmethod](https://my.oschina.net/staticmethod)
    def ms_error(labels,logits):
        return tf.square(tf.subtract(labels,logits))

    def _weight_variable(self,shape,name ='weights'):
        initializer =tf.random_normal_initializer(mean =0.,stddev =1.,)
        return tf.get_variable(shape =shape,initializer =initializer,name =name)

    def _bias_variable(self,shape,name ='biases'):
        initializer =tf.constant_initializer(0.1)
        return tf.get_variable(name =name,shape =shape,initializer =initializer)

if __name__ =='__main__':
    model =LSTMRNN(TIME_STEPS,INPUT_SIZE,OUTPUT_SIZE,CELL_SIZE,BATCH_SIZE)
    sess =tf.Session()
    merged =tf.summary.merge_all()
    writer =tf.summary.FileWriter("logs",sess.graph)

    if int((tf.__version__).split('.')[1]) <12 and int((tf.__version__).split('.')[0]) <1:
        init = tf.initialize_all_variables()
    else:
        init = tf.global_variables_initializer()
    sess.run(init)

    sess.run(tf.initialize_all_variables())
    plt.ion()
    plt.show()
    for i in range(200):
        seq,res,xs =get_batch()
        if i ==0:
            feed_dict ={
            model.xs:seq,
            model.ys:res,
            }
        else:
            feed_dict ={
            model.xs:seq,
            model.ys:res,
            model.cell_init_state:state
            }
        _,cost,state,pred =sess.run(
            [model.train_op,model.cost,model.cell_final_state,model.pred],
            feed_dict =feed_dict)

        plt.plot(xs[0,:],res[0].flatten(),'r',xs[0,:],pred.flatten()[:TIME_STEPS],'b--')
        plt.ylim((-1.2,1.2))
        plt.draw()
        plt.pause(0.3)

        if i % 20 ==0:
            print('cost:',round(cost,4))
            result =sess.run(merged,feed_dict)
            writer.add_summary(result,i)

D:\Test>python test.py
WARNING:tensorflow:From test.py:54: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This class is deprecated, please use tf.nn.rnn_cell.LSTMCell, which supports all the feature this cell currently has. Please replace the existing code with tf.nn.rnn_cell.LSTMCell(name='basic_lstm_cell').
2018-11-28 14:29:17.172952: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
cost: 25.8942
cost: 7.4876
cost: 1.0346
cost: 0.9841
cost: 0.6809
cost: 0.2195
cost: 0.4931
cost: 0.5682
cost: 0.3645
cost: 0.1143

自编码Autoencoder(深度学习)

from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

mnist =input_data.read_data_sets('/tmp/data/',one_hot =False)

learning_rate =0.01
training_epochs =5
batch_size =256
display_step =1
examples_to_show =10

n_input =784

X =tf.placeholder('float',[None,n_input])

n_hidden_1 =256
n_hidden_2 =128

weights ={
    'encoder_h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
    'encoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),

    'decoder_h1':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_1])),
    'decoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_input])),
}
biases ={
    'encoder_b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'encoder_b2':tf.Variable(tf.random_normal([n_hidden_2])),

    'decoder_b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'decoder_b2':tf.Variable(tf.random_normal([n_input])),
}

def encoder(x):
    layer_1 =tf.nn.sigmoid(tf.add(tf.matmul(x,weights['encoder_h1']),
        biases['encoder_b1']))
    layer_2 =tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['encoder_h2']),
        biases['encoder_b2']))
    return layer_2

def decoder(x):
    layer_1 =tf.nn.sigmoid(tf.add(tf.matmul(x,weights['decoder_h1']),
        biases['decoder_b1']))
    layer_2 =tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['decoder_h2']),
        biases['decoder_b2']))
    return layer_2

encoder_op =encoder(X)
decoder_op =decoder(encoder_op)

y_pred =decoder_op
y_true =X

cost =tf.reduce_mean(tf.pow(y_true -y_pred,2))
optimizer =tf.train.AdamOptimizer(learning_rate).minimize(cost)

with tf.Session() as sess:
    if int((tf.__version__).split('.')[1]) <12 and int((tf.__version__).split('.')[0]) <1:
        init = tf.initialize_all_variables()
    else:
        init = tf.global_variables_initializer()
    sess.run(init)
    total_batch =int(mnist.train.num_examples /batch_size)

    for epoch in range(training_epochs):
        for i in range(total_batch):
            batch_xs,batch_ys =mnist.train.next_batch(batch_size)
            _,c =sess.run([optimizer,cost],feed_dict ={X:batch_xs})

            if epoch % display_step ==0:
                print('Epoch:','%04d' % (epoch +1),'cost=','{:.9f}'.format(c))
    print('Optimization Finished!')

    encode_decode =sess.run(
        y_pred,feed_dict ={X:mnist.test.images[:examples_to_show]})
    f,a =plt.subplots(2,10,figsize =(10,2))
    for i in range(examples_to_show):
        a[0][i].imshow(np.reshape(mnist.test.images[i],(28,28)))
        a[1][i].imshow(np.reshape(encode_decode[i],(28,28)))
    plt.show()
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

mnist =input_data.read_data_sets('/tmp/data/',one_hot =False)

learning_rate =0.001
training_epochs =20
batch_size =256
display_step =1

n_input =784

X =tf.placeholder('float',[None,n_input])

n_hidden_1 =128
n_hidden_2 =64
n_hidden_3 =10
n_hidden_4 =2

weights ={
    'encoder_h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
    'encoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
    'encoder_h3':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_3])),
    'encoder_h4':tf.Variable(tf.random_normal([n_hidden_3,n_hidden_4])),

    'decoder_h1':tf.Variable(tf.random_normal([n_hidden_4,n_hidden_3])),
    'decoder_h2':tf.Variable(tf.random_normal([n_hidden_3,n_hidden_2])),
    'decoder_h3':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_1])),
    'decoder_h4':tf.Variable(tf.random_normal([n_hidden_1,n_input])),
}
biases ={
    'encoder_b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'encoder_b2':tf.Variable(tf.random_normal([n_hidden_2])),
    'encoder_b3':tf.Variable(tf.random_normal([n_hidden_3])),
    'encoder_b4':tf.Variable(tf.random_normal([n_hidden_4])),

    'decoder_b1':tf.Variable(tf.random_normal([n_hidden_3])),
    'decoder_b2':tf.Variable(tf.random_normal([n_hidden_2])),
    'decoder_b3':tf.Variable(tf.random_normal([n_hidden_1])),
    'decoder_b4':tf.Variable(tf.random_normal([n_input])),
}

def encoder(x):
    layer_1 =tf.nn.sigmoid(tf.add(tf.matmul(x,weights['encoder_h1']),
        biases['encoder_b1']))
    layer_2 =tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['encoder_h2']),
        biases['encoder_b2']))
    layer_3 =tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights['encoder_h3']),
        biases['encoder_b3']))
    layer_4 =tf.add(tf.matmul(layer_3,weights['encoder_h4']),
        biases['encoder_b4'])
    return layer_4

def decoder(x):
    layer_1 =tf.nn.sigmoid(tf.add(tf.matmul(x,weights['decoder_h1']),
        biases['decoder_b1']))
    layer_2 =tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['decoder_h2']),
        biases['decoder_b2']))
    layer_3 =tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights['decoder_h3']),
        biases['decoder_b3']))
    layer_4 =tf.nn.sigmoid(tf.add(tf.matmul(layer_3,weights['decoder_h4']),
        biases['decoder_b4']))
    return layer_4

encoder_op =encoder(X)
decoder_op =decoder(encoder_op)

y_pred =decoder_op
y_true =X

cost =tf.reduce_mean(tf.pow(y_true -y_pred,2))
optimizer =tf.train.AdamOptimizer(learning_rate).minimize(cost)

with tf.Session() as sess:
    if int((tf.__version__).split('.')[1]) <12 and int((tf.__version__).split('.')[0]) <1:
        init = tf.initialize_all_variables()
    else:
        init = tf.global_variables_initializer()
    sess.run(init)
    total_batch =int(mnist.train.num_examples /batch_size)

    for epoch in range(training_epochs):
        for i in range(total_batch):
            batch_xs,batch_ys =mnist.train.next_batch(batch_size)
            _,c =sess.run([optimizer,cost],feed_dict ={X:batch_xs})

            if epoch % display_step ==0:
                print('Epoch:','%04d' % (epoch +1),'cost=','{:.9f}'.format(c))
    print('Optimization Finished!')


    encoder_result =sess.run(encoder_op,feed_dict ={X:mnist.test.images})
    plt.scatter(encoder_result[:,0],encoder_result[:,1],c =mnist.test.labels)
    plt.colorbar()
    plt.show()

name_scope/variable_scope命令方式

from __future__ import print_function
import tensorflow as tf

tf.set_random_seed(1)

with tf.name_scope('a_name_socope'):
    initializer =tf.constant_initializer(value =1)
    var1 =tf.get_variable(name ='var1',shape =[1],dtype =tf.float32,initializer =initializer)
    var2 =tf.Variable(name ='var2',initial_value =[2],dtype =tf.float32)
    var21 =tf.Variable(name ='var2',initial_value =[2.1],dtype =tf.float32)
    var22 =tf.Variable(name ='var2',initial_value =[2.2],dtype =tf.float32)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print(var1.name)
    print(sess.run(var1))
    print(var2.name)
    print(sess.run(var2))
    print(var21.name)
    print(sess.run(var21))
    print(var22.name)
    print(sess.run(var22))

D:\Test>python test.py
2018-11-28 21:54:12.199352: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
var1:0
[1.]
a_name_socope/var2:0
[2.]
a_name_socope/var2_1:0
[2.1]
a_name_socope/var2_2:0
[2.2]
from __future__ import print_function
import tensorflow as tf

tf.set_random_seed(1)

with tf.variable_scope('a_variable_scope') as scope:
    initializer =tf.constant_initializer(value =3)
    var3 =tf.get_variable(name ='var3',shape =[1],dtype =tf.float32,initializer =initializer)
    var4 =tf.Variable(name ='var4',initial_value =[4],dtype =tf.float32)
    var4_reuse =tf.Variable(name ='var4',initial_value =[4],dtype =tf.float32)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print(var3.name)
    print(sess.run(var3))
    print(var4.name)
    print(sess.run(var4))
    print(var4_reuse.name)
    print(sess.run(var4_reuse))

D:\Test>python test.py
2018-11-28 21:59:34.699133: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
a_variable_scope/var3:0
[3.]
a_variable_scope/var4:0
[4.]
a_variable_scope/var4_1:0
[4.]
from __future__ import print_function
import tensorflow as tf

tf.set_random_seed(1)

with tf.variable_scope('a_variable_scope') as scope:
    initializer =tf.constant_initializer(value =3)
    var3 =tf.get_variable(name ='var3',shape =[1],dtype =tf.float32,initializer =initializer)
    var3_reuse =tf.get_variable(name ='var3')

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print(var3.name)
    print(sess.run(var3))
    print(var3_reuse.name)
    print(sess.run(var3_reuse.name))

D:\Test>python test.py
Traceback (most recent call last):
  File "test.py", line 9, in <module>
    var3_reuse =tf.get_variable(name ='var3')
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1487, in get_variable
    aggregation=aggregation)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1237, in get_variable
    aggregation=aggregation)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 540, in get_variable
    aggregation=aggregation)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 492, in _true_getter
    aggregation=aggregation)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 861, in _get_single_variable
    name, "".join(traceback.format_list(tb))))
ValueError: Variable a_variable_scope/var3 already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:

  File "D:\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1770, in __init__
    self._traceback = tf_stack.extract_stack()
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 3274, in create_op
    op_def=op_def)
  File "D:\Python\Python36\lib\site-packages\tensorflow\python\util\deprecation.py", line 488, in new_func
    return func(*args, **kwargs)



from __future__ import print_function
import tensorflow as tf

tf.set_random_seed(1)

with tf.variable_scope('a_variable_scope') as scope:
    initializer =tf.constant_initializer(value =3)
    var3 =tf.get_variable(name ='var3',shape =[1],dtype =tf.float32,initializer =initializer)
    scope.reuse_variables()
    var3_reuse =tf.get_variable(name ='var3')

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print(var3.name)
    print(sess.run(var3))
    print(var3_reuse.name)
    print(sess.run(var3_reuse.name))

D:\Test>python test.py
2018-11-28 22:03:23.644243: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From D:\Python\Python36\lib\site-packages\tensorflow\python\util\tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
a_variable_scope/var3:0
[3.]
a_variable_scope/var3:0
[3.]
from __future__ import print_function
import tensorflow as tf

tf.set_random_seed(1)

class TrainConfig:
    batch_size =20
    time_steps =20
    input_size =20
    output_size =2
    cell_size =11
    learning_rate =0.01

class TestConfig(TrainConfig):
    time_steps =1

class RNN(object):
    def __init__(self, config):
        self._batch_size = config.batch_size
        self._time_steps = config.time_steps
        self._input_size = config.input_size
        self._output_size = config.output_size
        self._cell_size = config.cell_size
        self._lr = config.learning_rate
        self._built_RNN()

    def _built_RNN(self):
        with tf.variable_scope('inputs'):
            self._xs = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._input_size], name='xs')
            self._ys = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._output_size], name='ys')
        with tf.name_scope('RNN'):
            with tf.variable_scope('input_layer'):
                l_in_x = tf.reshape(self._xs, [-1, self._input_size], name='2_2D')  # (batch*n_step, in_size)
                # Ws (in_size, cell_size)
                Wi = self._weight_variable([self._input_size, self._cell_size])
                print(Wi.name)
                # bs (cell_size, )
                bi = self._bias_variable([self._cell_size, ])
                # l_in_y = (batch * n_steps, cell_size)
                with tf.name_scope('Wx_plus_b'):
                    l_in_y = tf.matmul(l_in_x, Wi) + bi
                l_in_y = tf.reshape(l_in_y, [-1, self._time_steps, self._cell_size], name='2_3D')

            with tf.variable_scope('cell'):
                cell = tf.contrib.rnn.BasicLSTMCell(self._cell_size)
                with tf.name_scope('initial_state'):
                    self._cell_initial_state = cell.zero_state(self._batch_size, dtype=tf.float32)

                self.cell_outputs = []
                cell_state = self._cell_initial_state
                for t in range(self._time_steps):
                    if t > 0: tf.get_variable_scope().reuse_variables()
                    cell_output, cell_state = cell(l_in_y[:, t, :], cell_state)
                    self.cell_outputs.append(cell_output)
                self._cell_final_state = cell_state

            with tf.variable_scope('output_layer'):
                # cell_outputs_reshaped (BATCH*TIME_STEP, CELL_SIZE)
                cell_outputs_reshaped = tf.reshape(tf.concat(self.cell_outputs, 1), [-1, self._cell_size])
                Wo = self._weight_variable((self._cell_size, self._output_size))
                bo = self._bias_variable((self._output_size,))
                product = tf.matmul(cell_outputs_reshaped, Wo) + bo
                # _pred shape (batch*time_step, output_size)
                self._pred = tf.nn.relu(product)    # for displacement

        with tf.name_scope('cost'):
            _pred = tf.reshape(self._pred, [self._batch_size, self._time_steps, self._output_size])
            mse = self.ms_error(_pred, self._ys)
            mse_ave_across_batch = tf.reduce_mean(mse, 0)
            mse_sum_across_time = tf.reduce_sum(mse_ave_across_batch, 0)
            self._cost = mse_sum_across_time
            self._cost_ave_time = self._cost / self._time_steps

        with tf.variable_scope('trian'):
            self._lr = tf.convert_to_tensor(self._lr)
            self.train_op = tf.train.AdamOptimizer(self._lr).minimize(self._cost)

    @staticmethod
    def ms_error(y_target, y_pre):
        return tf.square(tf.subtract(y_target, y_pre))

    @staticmethod
    def _weight_variable(shape, name='weights'):
        initializer = tf.random_normal_initializer(mean=0., stddev=0.5, )
        return tf.get_variable(shape=shape, initializer=initializer, name=name)

    @staticmethod
    def _bias_variable(shape, name='biases'):
        initializer = tf.constant_initializer(0.1)
        return tf.get_variable(name=name, shape=shape, initializer=initializer)


if __name__ =='__main__':
    train_config =TrainConfig()
    test_config =TestConfig()
    with tf.variable_scope('train_rnn'):
        train_rnn1 =RNN(train_config)
    with tf.variable_scope('test_rnn'):
        test_rnn1 =RNN(test_config)

    with tf.variable_scope('rnn') as scope:
        sess =tf.Session()
        train_rnn2 =RNN(train_config)
        scope.reuse_variables()
        test_rnn2 =RNN(test_config)
        
        if int((tf.__version__).split('.')[1]) <12 and int((tf.__version__).split('.')[0]) <1:
            init =tf.initialize_all_variables()
        else:
            init =tf.global_variables_initializer()
        sess.run(init)

D:\Test>python test.py
train_rnn/input_layer/weights:0
WARNING:tensorflow:From test.py:45: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This class is deprecated, please use tf.nn.rnn_cell.LSTMCell, which supports all the feature this cell currently has. Please replace the existing code with tf.nn.rnn_cell.LSTMCell(name='basic_lstm_cell').
test_rnn/input_layer/weights:0
2018-11-28 22:27:28.802503: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
rnn/input_layer/weights:0
rnn/input_layer/weights:0

Batch Normalization批标准化(deep learning)


"""
visit https://morvanzhou.github.io/tutorials/ for more!
Build two networks.
1. Without batch normalization
2. With batch normalization
Run tests on these two networks.
"""

# 23 Batch Normalization

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt


ACTIVATION = tf.nn.relu
N_LAYERS = 7
N_HIDDEN_UNITS = 30


def fix_seed(seed=1):
    # reproducible
    np.random.seed(seed)
    tf.set_random_seed(seed)


def plot_his(inputs, inputs_norm):
    # plot histogram for the inputs of every layer
    for j, all_inputs in enumerate([inputs, inputs_norm]):
        for i, input in enumerate(all_inputs):
            plt.subplot(2, len(all_inputs), j*len(all_inputs)+(i+1))
            plt.cla()
            if i == 0:
                the_range = (-7, 10)
            else:
                the_range = (-1, 1)
            plt.hist(input.ravel(), bins=15, range=the_range, color='#FF5733')
            plt.yticks(())
            if j == 1:
                plt.xticks(the_range)
            else:
                plt.xticks(())
            ax = plt.gca()
            ax.spines['right'].set_color('none')
            ax.spines['top'].set_color('none')
        plt.title("%s normalizing" % ("Without" if j == 0 else "With"))
    plt.draw()
    plt.pause(0.01)


def built_net(xs, ys, norm):
    def add_layer(inputs, in_size, out_size, activation_function=None, norm=False):
        # weights and biases (bad initialization for this case)
        Weights = tf.Variable(tf.random_normal([in_size, out_size], mean=0., stddev=1.))
        biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)

        # fully connected product
        Wx_plus_b = tf.matmul(inputs, Weights) + biases

        # normalize fully connected product
        if norm:
            # Batch Normalize
            fc_mean, fc_var = tf.nn.moments(
                Wx_plus_b,
                axes=[0],   # the dimension you wanna normalize, here [0] for batch
                            # for image, you wanna do [0, 1, 2] for [batch, height, width] but not channel
            )
            scale = tf.Variable(tf.ones([out_size]))
            shift = tf.Variable(tf.zeros([out_size]))
            epsilon = 0.001

            # apply moving average for mean and var when train on batch
            ema = tf.train.ExponentialMovingAverage(decay=0.5)
            def mean_var_with_update():
                ema_apply_op = ema.apply([fc_mean, fc_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(fc_mean), tf.identity(fc_var)
            mean, var = mean_var_with_update()

            Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)
            # similar with this two steps:
            # Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)
            # Wx_plus_b = Wx_plus_b * scale + shift

        # activation
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)

        return outputs

    fix_seed(1)

    if norm:
        # BN for the first input
        fc_mean, fc_var = tf.nn.moments(
            xs,
            axes=[0],
        )
        scale = tf.Variable(tf.ones([1]))
        shift = tf.Variable(tf.zeros([1]))
        epsilon = 0.001
        # apply moving average for mean and var when train on batch
        ema = tf.train.ExponentialMovingAverage(decay=0.5)
        def mean_var_with_update():
            ema_apply_op = ema.apply([fc_mean, fc_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(fc_mean), tf.identity(fc_var)
        mean, var = mean_var_with_update()
        xs = tf.nn.batch_normalization(xs, mean, var, shift, scale, epsilon)

    # record inputs for every layer
    layers_inputs = [xs]

    # build hidden layers
    for l_n in range(N_LAYERS):
        layer_input = layers_inputs[l_n]
        in_size = layers_inputs[l_n].get_shape()[1].value

        output = add_layer(
            layer_input,    # input
            in_size,        # input size
            N_HIDDEN_UNITS, # output size
            ACTIVATION,     # activation function
            norm,           # normalize before activation
        )
        layers_inputs.append(output)    # add output for next run

    # build output layer
    prediction = add_layer(layers_inputs[-1], 30, 1, activation_function=None)

    cost = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
    train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
    return [train_op, cost, layers_inputs]

# make up data
fix_seed(1)
x_data = np.linspace(-7, 10, 2500)[:, np.newaxis]
np.random.shuffle(x_data)
noise = np.random.normal(0, 8, x_data.shape)
y_data = np.square(x_data) - 5 + noise

# plot input data
plt.scatter(x_data, y_data)
plt.show()

xs = tf.placeholder(tf.float32, [None, 1])  # [num_samples, num_features]
ys = tf.placeholder(tf.float32, [None, 1])

train_op, cost, layers_inputs = built_net(xs, ys, norm=False)   # without BN
train_op_norm, cost_norm, layers_inputs_norm = built_net(xs, ys, norm=True) # with BN

sess = tf.Session()
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
    init = tf.initialize_all_variables()
else:
    init = tf.global_variables_initializer()
sess.run(init)

# record cost
cost_his = []
cost_his_norm = []
record_step = 5

plt.ion()
plt.figure(figsize=(7, 3))
for i in range(250):
    if i % 50 == 0:
        # plot histogram
        all_inputs, all_inputs_norm = sess.run([layers_inputs, layers_inputs_norm], feed_dict={xs: x_data, ys: y_data})
        plot_his(all_inputs, all_inputs_norm)

    # train on batch
    sess.run([train_op, train_op_norm], feed_dict={xs: x_data[i*10:i*10+10], ys: y_data[i*10:i*10+10]})

    if i % record_step == 0:
        # record cost
        cost_his.append(sess.run(cost, feed_dict={xs: x_data, ys: y_data}))
        cost_his_norm.append(sess.run(cost_norm, feed_dict={xs: x_data, ys: y_data}))

plt.ioff()
plt.figure()
plt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his), label='no BN')     # no norm
plt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his_norm), label='BN')   # norm
plt.legend()
plt.show()

可视化梯度下降公式调参

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

LR =0.1
REAL_PARAMS =[1.2,2.5]
INIT_PARAMS =[[5,4],[5,1],[2,4.5]][2]

x =np.linspace(-1,1,200,dtype =np.float32)

y_fun =lambda a,b:a *x +b
tf_y_fun =lambda a,b:a *x +b

noise =np.random.randn(200) /10
y =y_fun(*REAL_PARAMS) +noise
plt.scatter(x,y)
plt.show()
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

LR =0.1
REAL_PARAMS =[1.2,2.5]
INIT_PARAMS =[[5,4],[5,1],[2,4.5]][2]

x =np.linspace(-1,1,200,dtype =np.float32)

y_fun =lambda a,b:a *x +b
tf_y_fun =lambda a,b:a *x +b

noise =np.random.randn(200) /10
y =y_fun(*REAL_PARAMS) +noise

a,b =[tf.Variable(initial_value =p,dtype =tf.float32) for p in INIT_PARAMS]
pred =tf_y_fun(a,b)
mse =tf.reduce_mean(tf.square(y -pred))
train_op =tf.train.GradientDescentOptimizer(LR).minimize(mse)

a_list,b_list,cost_list =[],[],[]
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for t in range(400):
        a_,b_,mse_ =sess.run([a,b,mse])
        a_list.append(a_);b_list.append(b_);cost_list.append(mse_)
        result,_ =sess.run([pred,train_op])

print('a=',a_,'b=',b_)
plt.figure(1)
plt.scatter(x,y,c ='b')
plt.plot(x,result,'r--',lw =2)
fig =plt.figure(2);ax =Axes3D(fig)
a3D,b3D =np.meshgrid(np.linspace(-2,7,30),np.linspace(-2,7,30))
cost3D =np.array([np.mean(np.square(y_fun(a_,b_) -y)) for a_,b_ in zip(a3D.flatten(),b3D.flatten())]).reshape(a3D.shape)
ax.plot_surface(a3D,b3D,cost3D,rstride =1,cstride =1,cmap=plt.get_cmap('rainbow'),alpha =0.5)
ax.scatter(a_list[0],b_list[0],zs =cost_list[0],s =300,c ='r')
ax.set_xlabel('a');ax.set_ylabel('b')
ax.plot(a_list,b_list,zs =cost_list,zdir ='z',c ='r',lw =3)
plt.show()

D:\Test>python test.py
2018-11-28 23:46:05.030447: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
a= 1.2127774 b= 2.5050323
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

LR =0.1
REAL_PARAMS =[1.2,2.5]
INIT_PARAMS =[[5,4],[5,1],[2,4.5]][2]

x =np.linspace(-1,1,200,dtype =np.float32)

y_fun =lambda a,b:a *x **3 +b *x **2
tf_y_fun =lambda a,b:a *x **3 +b *x **2

noise =np.random.randn(200) /10
y =y_fun(*REAL_PARAMS) +noise

a,b =[tf.Variable(initial_value =p,dtype =tf.float32) for p in INIT_PARAMS]
pred =tf_y_fun(a,b)
mse =tf.reduce_mean(tf.square(y -pred))
train_op =tf.train.GradientDescentOptimizer(LR).minimize(mse)

a_list,b_list,cost_list =[],[],[]
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for t in range(400):
        a_,b_,mse_ =sess.run([a,b,mse])
        a_list.append(a_);b_list.append(b_);cost_list.append(mse_)
        result,_ =sess.run([pred,train_op])

print('a=',a_,'b=',b_)
plt.figure(1)
plt.scatter(x,y,c ='b')
plt.plot(x,result,'r--',lw =2)
fig =plt.figure(2);ax =Axes3D(fig)
a3D,b3D =np.meshgrid(np.linspace(-2,7,30),np.linspace(-2,7,30))
cost3D =np.array([np.mean(np.square(y_fun(a_,b_) -y)) for a_,b_ in zip(a3D.flatten(),b3D.flatten())]).reshape(a3D.shape)
ax.plot_surface(a3D,b3D,cost3D,rstride =1,cstride =1,cmap=plt.get_cmap('rainbow'),alpha =0.5)
ax.scatter(a_list[0],b_list[0],zs =cost_list[0],s =300,c ='r')
ax.set_xlabel('a');ax.set_ylabel('b')
ax.plot(a_list,b_list,zs =cost_list,zdir ='z',c ='r',lw =3)
plt.show()

D:\Test>python test.py
2018-11-28 23:48:38.872926: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
a= 1.174886 b= 2.5069551
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

LR =0.1
REAL_PARAMS =[1.2,2.5]
INIT_PARAMS =[[5,4],[5,1],[2,4.5]][1]

x =np.linspace(-1,1,200,dtype =np.float32)

y_fun =lambda a,b:np.sin(b *np.cos(a *x))
tf_y_fun =lambda a,b:tf.sin(b *tf.cos(a *x))

noise =np.random.randn(200) /10
y =y_fun(*REAL_PARAMS) +noise

a,b =[tf.Variable(initial_value =p,dtype =tf.float32) for p in INIT_PARAMS]
pred =tf_y_fun(a,b)
mse =tf.reduce_mean(tf.square(y -pred))
train_op =tf.train.GradientDescentOptimizer(LR).minimize(mse)

a_list,b_list,cost_list =[],[],[]
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for t in range(400):
        a_,b_,mse_ =sess.run([a,b,mse])
        a_list.append(a_);b_list.append(b_);cost_list.append(mse_)
        result,_ =sess.run([pred,train_op])

print('a=',a_,'b=',b_)
plt.figure(1)
plt.scatter(x,y,c ='b')
plt.plot(x,result,'r--',lw =2)
fig =plt.figure(2);ax =Axes3D(fig)
a3D,b3D =np.meshgrid(np.linspace(-2,7,30),np.linspace(-2,7,30))
cost3D =np.array([np.mean(np.square(y_fun(a_,b_) -y)) for a_,b_ in zip(a3D.flatten(),b3D.flatten())]).reshape(a3D.shape)
ax.plot_surface(a3D,b3D,cost3D,rstride =1,cstride =1,cmap=plt.get_cmap('rainbow'),alpha =0.5)
ax.scatter(a_list[0],b_list[0],zs =cost_list[0],s =300,c ='r')
ax.set_xlabel('a');ax.set_ylabel('b')
ax.plot(a_list,b_list,zs =cost_list,zdir ='z',c ='r',lw =3)
plt.show()

D:\Test>python test.py
2018-11-28 23:52:32.588903: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
a= 4.5588837 b= -0.5484938
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

LR =0.1
REAL_PARAMS =[1.2,2.5]
INIT_PARAMS =[[5,4],[5,1],[2,4.5]][2]

x =np.linspace(-1,1,200,dtype =np.float32)

y_fun =lambda a,b:np.sin(b *np.cos(a *x))
tf_y_fun =lambda a,b:tf.sin(b *tf.cos(a *x))

noise =np.random.randn(200) /10
y =y_fun(*REAL_PARAMS) +noise

a,b =[tf.Variable(initial_value =p,dtype =tf.float32) for p in INIT_PARAMS]
pred =tf_y_fun(a,b)
mse =tf.reduce_mean(tf.square(y -pred))
train_op =tf.train.GradientDescentOptimizer(LR).minimize(mse)

a_list,b_list,cost_list =[],[],[]
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for t in range(400):
        a_,b_,mse_ =sess.run([a,b,mse])
        a_list.append(a_);b_list.append(b_);cost_list.append(mse_)
        result,_ =sess.run([pred,train_op])

print('a=',a_,'b=',b_)
plt.figure(1)
plt.scatter(x,y,c ='b')
plt.plot(x,result,'r--',lw =2)
fig =plt.figure(2);ax =Axes3D(fig)
a3D,b3D =np.meshgrid(np.linspace(-2,7,30),np.linspace(-2,7,30))
cost3D =np.array([np.mean(np.square(y_fun(a_,b_) -y)) for a_,b_ in zip(a3D.flatten(),b3D.flatten())]).reshape(a3D.shape)
ax.plot_surface(a3D,b3D,cost3D,rstride =1,cstride =1,cmap=plt.get_cmap('rainbow'),alpha =0.5)
ax.scatter(a_list[0],b_list[0],zs =cost_list[0],s =300,c ='r')
ax.set_xlabel('a');ax.set_ylabel('b')
ax.plot(a_list,b_list,zs =cost_list,zdir ='z',c ='r',lw =3)
plt.show()

D:\Test>python test.py
2018-11-28 23:51:21.759442: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
a= 1.2121553 b= 2.5152488

transfer learning

转载于:https://my.oschina.net/hellopasswd/blog/2876188

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值