# 项目实训第二周工作总结---tensorflow学习笔记

这周我主要学习了一下tensorflow的用法，并做了几个实践的小项目

1.1 创建图，启动图

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:34:29 2018

@author: sduhao

"""

#创建图 启动图

import tensorflow as tf

m1=tf.constant([[3,3]])

m2=tf.constant([[2],[3]])

product=tf.matmul(m1,m2)

with tf.Session()as sess:

result=sess.run(product)

print(result)

1.2变量

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:37:35 2018

@author: sduhao

"""

#tensorflow中的变量

import tensorflow as tf

x=tf.Variable([1,2])

a=tf.constant([3,3])

sub=tf.subtract(x,a)

init=tf.global_variables_initializer()  #变量必须初始化

with tf.Session() as sess:

sess.run(init)

print sess.run(sub)

1.3赋值操作

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:40:48 2018

@author: sduhao

"""

import tensorflow as tf

state=tf.Variable(0,name="counter")

update=tf.assign(state,new_value)  #赋值操作

init=tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

print sess.run(state)

for _ in range(5):

sess.run(update)

print sess.run(state)

1.4 Fetch And Feed

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:44:14 2018

@author: sduhao

"""

#Fetch 同时运行多个运算符

import tensorflow as tf

input1=tf.constant(2.0)

input2=tf.constant(3.0)

input3=tf.constant(5.0)

with tf.Session() as sess:

print res

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:47:21 2018

@author: sduhao

"""

#Feed 创建占位符

import tensorflow as tf

input1=tf.placeholder(tf.float32)

input2=tf.placeholder(tf.float32)

mul=tf.multiply(input1,input2)

with tf.Session() as sess:

res=sess.run(mul,feed_dict={input1:[8.],input2:[2.]}) #Feed 数据以字典的形式传入

print res

1.5小例子：线性逼近

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:50:46 2018

@author: sduhao

"""

#tensorflow 简单示例

import tensorflow as tf

import numpy as np

x_data=np.random.rand(100)   #生成100个随机数

y_data=x_data*0.1+0.2    #构造一条直线

#构造一个线性模型 逼近上述直线

b=tf.Variable(1.1)

k=tf.Variable(0.5)

y=k*x_data+b

#定义均方误差

loss=tf.reduce_mean(tf.square(y-y_data))

#用梯度下降法训练优化

train=optimizer.minimize(loss)

init=tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

for step in range(201):

sess.run(train)

if(step%20==0):

print step,sess.run([k,b])

2.1非线性回归

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 19:58:47 2018

@author: sduhao

"""

#非线性回归

import tensorflow as tf

import numpy as np

import matplotlib.pyplot as plt

#生成200个随机样本点

x_data=np.linspace(-0.5,0.5,200)[:,np.newaxis] #并转换为列向量

noise=np.random.normal(0,0.02,x_data.shape) #生成同维度的噪声点

y_data=np.square(x_data)+noise

#定义两个占位符

x=tf.placeholder(tf.float32,[None,1]) #行根据具体情况确定

y=tf.placeholder(tf.float32,[None,1])

#定义神经网络中间层

Weights_L1=tf.Variable(tf.random_normal([1,10])) #输入层1个神经元 中间层10个神经元

biases_L1=tf.Variable(tf.zeros([10]))

Wx_plus_b_L1=tf.matmul(x,Weights_L1)+biases_L1

L1=tf.nn.tanh(Wx_plus_b_L1)  #中间层输出 经过tanh激活函数

#定义神经网络输出层

Weights_L2=tf.Variable(tf.random_normal([10,1]))

biases_L2=tf.zeros([1])

Wx_plus_b_L2=tf.matmul(L1,Weights_L2)+biases_L2

prediction=tf.nn.tanh(Wx_plus_b_L2)

#定义均方误差

loss=tf.reduce_mean(tf.square(prediction-y))

#使用梯度下降发进行优化

init=tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

for _ in range(2000):

sess.run(train,feed_dict={x:x_data,y:y_data})

predict_value=sess.run(prediction,feed_dict={x:x_data})

plt.figure()

plt.scatter(x_data,y_data)

plt.plot(x_data,predict_value,'r-',lw=5)

plt.show()

2.2手写数字识别v1(准备率91%）

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 20:16:25 2018

@author: sduhao

"""

'''

MNIST手写数字识别

MNIST数据集：

60000个训练样本，10000个测试样本

'''

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

#载入数据

batch_size=100

n_batch=mnist.train.num_examples//batch_size

#定义placeholder

x=tf.placeholder(tf.float32,[None,784])  #Nonebatch_size对应

y=tf.placeholder(tf.float32,[None,10])

W=tf.Variable(tf.zeros([784,10]))

b=tf.Variable(tf.zeros([10]))

prediction=tf.nn.softmax(tf.matmul(x,W)+b)

#均方误差

loss=tf.reduce_mean(tf.square(y-prediction))

#梯度下降法

init=tf.global_variables_initializer()

#计算准确率

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax找到值最大的分量所在的位置

accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:

sess.run(init)

for epoch in range(21):   #21

for bacth in range(n_batch):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

sess.run(train,feed_dict={x:batch_xs,y:batch_ys})

acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})

print "Iter"+str(epoch)+",Testing Accuracy"+str(acc)

'''

1.batch_size的大小

2.神经网络的结构 加一些隐层（隐层的数量和内部神经元的数量）

3.神经网络权重参数和偏置参数的初始化方式

4.误差定义的方式  如可采用交叉熵

5.优化器的选择 以及学习率的设置

6.训练的轮数

3.1手写数字识别v2（准确率96%，演示dropout）

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 20:45:59 2018

@author: sduhao

"""

'''

'''

'''

'''

'''tt

tf.train.MomentumOptimizer

...

'''

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

#载入数据

batch_size=100

n_batch=mnist.train.num_examples//batch_size

#定义placeholder

x=tf.placeholder(tf.float32,[None,784])  #Nonebatch_size对应

y=tf.placeholder(tf.float32,[None,10])

keep_prob=tf.placeholder(tf.float32) #dropout参数  训练时中间层神经层神经元的工作比例

#增加3个隐层，来演示dropout防过拟合的效果（实际上并不需要这么多隐层和神经元）

#隐层1

W1=tf.Variable(tf.truncated_normal([784,2000],stddev=0.1)) #权值参数一般用截断正态分布初始化

b1=tf.Variable(tf.zeros([2000])+0.1)   #偏置参数一般用zeros+0.1

L1=tf.nn.tanh(tf.matmul(x,W1)+b1)

L1_drop=tf.nn.dropout(L1,keep_prob)

#隐层2

W2=tf.Variable(tf.truncated_normal([2000,2000],stddev=0.1)) #权值参数一般用截断正态分布初始化

b2=tf.Variable(tf.zeros([2000])+0.1)   #偏置参数一般用zeros+0.1

L2=tf.nn.tanh(tf.matmul(L1_drop,W2)+b2)

L2_drop=tf.nn.dropout(L2,keep_prob)

#隐层3

W3=tf.Variable(tf.truncated_normal([2000,1000],stddev=0.1)) #权值参数一般用截断正态分布初始化

b3=tf.Variable(tf.zeros([1000])+0.1)   #偏置参数一般用zeros+0.1

L3=tf.nn.tanh(tf.matmul(L2_drop,W3)+b3)

L3_drop=tf.nn.dropout(L3,keep_prob)

#输出层

W4=tf.Variable(tf.truncated_normal([1000,10],stddev=0.1)) #权值参数一般用截断正态分布初始化

b4=tf.Variable(tf.zeros([10])+0.1)   #偏置参数一般用zeros+0.1

prediction=tf.nn.softmax(tf.matmul(L3_drop,W4)+b4)

#对数似然代价函数

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))

init=tf.global_variables_initializer()

#计算准确率

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax找到值最大的分量所在的位置

accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:

sess.run(init)

for epoch in range(31):   #31

for bacth in range(n_batch):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

sess.run(train,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7}) #训练时隐层每层有70%的神经元工作

train_acc=sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})#测试时所有神经元都工作

test_acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})#测试时所有神经元都工作

print "Iter"+str(epoch)+",Testing Accuracy"+str(test_acc)+",Training Accuracy"+str(train_acc)

3.2手写数字识别v3（准确率98%）

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Mon Apr 16 21:14:29 2018

@author: sduhao

"""

'''

'''

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

#载入数据

batch_size=100

n_batch=mnist.train.num_examples//batch_size

#定义placeholder

x=tf.placeholder(tf.float32,[None,784])  #Nonebatch_size对应

y=tf.placeholder(tf.float32,[None,10])

keep_prob=tf.placeholder(tf.float32) #dropout参数  训练时中间层神经层神经元的工作比例

lr=tf.Variable(0.001,dtype=tf.float32) #定义学习率为变量 使其能随着训练的深入动态变化 初始时较大，慢慢变小

#增加2个隐层

#隐层1

W1=tf.Variable(tf.truncated_normal([784,500],stddev=0.1)) #权值参数一般用截断正态分布初始化

b1=tf.Variable(tf.zeros([500])+0.1)   #偏置参数一般用zeros+0.1

L1=tf.nn.tanh(tf.matmul(x,W1)+b1)

L1_drop=tf.nn.dropout(L1,keep_prob)

#隐层2

W2=tf.Variable(tf.truncated_normal([500,300],stddev=0.1)) #权值参数一般用截断正态分布初始化

b2=tf.Variable(tf.zeros([300])+0.1)   #偏置参数一般用zeros+0.1

L2=tf.nn.tanh(tf.matmul(L1_drop,W2)+b2)

L2_drop=tf.nn.dropout(L2,keep_prob)

#输出层

W3=tf.Variable(tf.truncated_normal([300,10],stddev=0.1)) #权值参数一般用截断正态分布初始化

b3=tf.Variable(tf.zeros([10])+0.1)   #偏置参数一般用zeros+0.1

prediction=tf.nn.softmax(tf.matmul(L2_drop,W3)+b3)

#对数似然代价函数

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))

init=tf.global_variables_initializer()

#计算准确率

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax找到值最大的分量所在的位置

accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:

sess.run(init)

for epoch in range(51):   #51

sess.run(tf.assign(lr,0.001*(0.95**epoch)))

for bacth in range(n_batch):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

sess.run(train,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0}) #没有必要使用dropout 设置为1.0

test_acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})#测试时所有神经元都工作

print "Iter"+str(epoch)+",Testing Accuracy"+str(test_acc)

4.1 使用 tensorboard

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Tue Apr 17 18:05:20 2018

@author: sduhao

"""

#使用tensorboard可视化网络结构和张量的计算流动过程以及监控各指标的变化情况

#使用手写数字识别v1程序进行演示

#再次运行时重启一下Kernel 清除缓存

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

batch_size=100

n_batch=mnist.train.num_examples//batch_size

#定义参数概要函数

def variable_summaries(var):

with tf.name_scope('summaries'):

mean=tf.reduce_mean(var)   #计算变量的均值

tf.summary.scalar('mean',mean) #监控均值

with tf.name_scope('stddev'):

stddev=tf.sqrt(tf.reduce_mean(tf.square(var-mean))) #计算标准差

tf.summary.scalar('stddev',stddev) #监控标准差

tf.summary.scalar('max',tf.reduce_max(var)) #监控最大值

tf.summary.scalar('min',tf.reduce_min(var)) #监控最小值

tf.summary.histogram('histogram',var)       #直方图

#命名空间  输入层

with tf.name_scope('input'):

x=tf.placeholder(tf.float32,[None,784],name='x-input')

y=tf.placeholder(tf.float32,[None,10],name='y-input')

#输出层

with tf.name_scope('layer'):

with tf.name_scope('weight'):

W=tf.Variable(tf.zeros([784,10]),name='w')

variable_summaries(W)  #监控权值参数各属性变化情况

with tf.name_scope('biase'):

b=tf.Variable(tf.zeros([10]),name='b')

variable_summaries(b)

with tf.name_scope('Wx_plus_b'):

wx_plus_b=tf.matmul(x,W)+b

with tf.name_scope('softmax'):

prediction=tf.nn.softmax(wx_plus_b)

with tf.name_scope('loss'):

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))

tf.summary.scalar('loss',loss)  #只需关注loss本身的变化情况即可  不需要关注上述属性

with tf.name_scope('train'):

init=tf.global_variables_initializer()

with tf.name_scope('accuracy'):

with tf.name_scope('correct_prediction'):

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))

with tf.name_scope('accuracy'):

accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

tf.summary.scalar('accuracy',accuracy)

#合并所有的summary

merged=tf.summary.merge_all()

with tf.Session() as sess:

sess.run(init)

writer=tf.summary.FileWriter('logs/',sess.graph)  #在当前目录下新建logs目录，在其下写入图文件

for epoch in range(21):

for batch in range(n_batch):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

summary,_=sess.run([merged,train],feed_dict={x:batch_xs,y:batch_ys})

#每过一个epoch统计一次

acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})

print "Iter"+str(epoch)+",Testing Accuracy:"+str(acc)

'''

tensorboard --logdir=图文件路径（绝对路径）

'''

'''

for i in range(2001):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

summary,_=sess.run([merged,train],feed_dict={x:batch_xs,y:batch_ys})

if i%500==0:

print sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})

'''

5.1 使用CNN实现手写数字 （准确率达99%以上）

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Tue Apr 17 19:01:44 2018

@author: sduhao

"""

#使用CNN实现手写数字识别

#再次运行时重启一下kernel

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

batch_size=100

n_batch=mnist.train.num_examples//batch_size

#参数概要

def variable_summaries(var):

with tf.name_scope('summaries'):

mean=tf.reduce_mean(var)

tf.summary.scalar('mean',mean)

with tf.name_scope('stddev'):

stddev=tf.sqrt(tf.reduce_mean(tf.square(var-mean)))

tf.summary.scalar('stddev',stddev)

tf.summary.scalar('max',tf.reduce_max(var))

tf.summary.scalar('min',tf.reduce_min(var))

tf.summary.histogram('histogram',var)

#权值初始化函数

def weight_variable(shape,name):

initial=tf.truncated_normal(shape,stddev=0.1)

return tf.Variable(initial,name=name)

#偏置值初始化函数

def bias_variable(shape,name):

initial=tf.constant(0.1,shape=shape)

return tf.Variable(initial,name=name)

#卷积层

def conv2d(x,w):

'''

x为四维张量[batch_size,height,width,channels]

w为filter的参数 [fileter_height,filter_width,in_channels,out_channels]

'''

'''

步长第0，3位默认为1 第1位：x方向上的步长 第2位:y方向上的步长

SAME在外围填充0

VALID不填充0

'''

#池化层

def max_pool_2x2(x):

'''

ksize第0，3位默认为1，中间两位为窗口大小

'''

#输入层

with tf.name_scope('input'):

x=tf.placeholder(tf.float32,[None,784],name='x-input')

y=tf.placeholder(tf.float32,[None,10],name='y-input')

with tf.name_scope('x_image'): #把x转换为四维张量的形式 （列向量-》平面）

x_image=tf.reshape(x,[-1,28,28,1],name='x_image')

#第一层卷积

with tf.name_scope('Conv1'):

with tf.name_scope('W_conv1'):

W_conv1=weight_variable([5,5,1,32],name='W_conv1')

with tf.name_scope('b_conv1'):

b_conv1=bias_variable([32],name='b_conv1')

with tf.name_scope('conv2d_1'):  #  卷积提取特征

conv2d_1=conv2d(x_image,W_conv1)+b_conv1

with tf.name_scope('relu'):

h_conv1=tf.nn.relu(conv2d_1)

with tf.name_scope('h_pool1'):  #池化压缩特征

h_pool1=max_pool_2x2(h_conv1)

'''

28*28*1-(conv1)->28*28*32-(maxpool)->14*14*32

'''

#第二层卷积

with tf.name_scope('Conv2'):

with tf.name_scope('W_conv2'):

W_conv2=weight_variable([5,5,32,64],name='W_conv2')

with tf.name_scope('b_conv2'):

b_conv2=bias_variable([64],name='b_conv2')

with tf.name_scope('conv2d_2'):  #  卷积提取特征

conv2d_2=conv2d(h_pool1,W_conv2)+b_conv2

with tf.name_scope('relu'):

h_conv2=tf.nn.relu(conv2d_2)

with tf.name_scope('h_pool2'):  #池化压缩特征

h_pool2=max_pool_2x2(h_conv2)

'''

14*14*32-(conv2)->14*14*64-(maxpool)->7*7*64

'''

#第一个全联接层

with tf.name_scope('fc1'):

with tf.name_scope('W_fc1'):

W_fc1=weight_variable([7*7*64,1024],name='W_fc1')

with tf.name_scope('b_fc1'):

b_fc1=bias_variable([1024],name='b_fc1')

#把第二层池化后的结果扁平化（平面-》列向量

with tf.name_scope('h_pool2_flat'):

h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64],name='h_pool2_flat')

with tf.name_scope('wx_plus_b1'):

wx_plus_b1=tf.matmul(h_pool2_flat,W_fc1)+b_fc1

with tf.name_scope('relu'):

h_fc1=tf.nn.relu(wx_plus_b1)

with tf.name_scope('keep_prob'):

keep_prob=tf.placeholder(tf.float32,name='keep_prob')

with tf.name_scope('h_fc1_dropout'):

h_fc1_dropout=tf.nn.dropout(h_fc1,keep_prob,name='h_fc1_dropout')

#第二个全联接层

with tf.name_scope('fc2'):

with tf.name_scope('W_fc2'):

W_fc2=weight_variable([1024,10],name='W_fc2')

with tf.name_scope('b_fc2'):

b_fc2=bias_variable([10],name='b_fc2')

with tf.name_scope('wx_plus_b2'):

wx_plus_b2=tf.matmul(h_fc1_dropout,W_fc2)+b_fc2

with tf.name_scope('softmax'):

prediction=tf.nn.softmax(wx_plus_b2)

#交叉熵代价函数

with tf.name_scope('cross_entrogy'):

cross_entrogy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))

tf.summary.scalar('cross_entrogy',cross_entrogy)

#训练

with tf.name_scope('train'):

#求准确率

with tf.name_scope('accuracy'):

with tf.name_scope('correct_prediction'):

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))

with tf.name_scope('accuracy'):

accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

tf.summary.scalar('accuracy',accuracy)

merged=tf.summary.merge_all()

with tf.Session() as sess:

sess.run(tf.global_variables_initializer())

train_writer=tf.summary.FileWriter('logs/train',sess.graph)

test_writer=tf.summary.FileWriter('logs/test',sess.graph)

for i in range(12001):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

sess.run(train,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.5})

summary=sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})

batch_xs,batch_ys=mnist.test.next_batch(batch_size)

summary=sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})

if i%100==0:

test_acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})

train_acc=sess.run(accuracy,feed_dict={x:mnist.train.images[:10000],y:mnist.train.labels[:10000],keep_prob:1.0})

print "Iter"+str(i)+",Testing Accuracy:"+str(test_acc)+",Training Accuracy"+str(train_acc)

6.1 使用LSTM实现手写数字识别

#!/usr/bin/env python2

# -*- coding: utf-8 -*-

"""

Created on Tue Apr 17 20:21:10 2018

@author: sduhao

"""

#使用LSTM实现手写数字识别

#RNN/LSTM适合处理文本、声音等序列化信息，也可用做图片的分类

#再次运行时 重启kernel

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

#输入图片是28*28

n_inputs=28  #每次输入一张图片的一行 输入层28个神经元（把一张图片序列化）

max_time=28 #一张图片有28次输入

lstm_size=100 #隐层单元

n_classes=10 #10分类

batch_size=50

n_batch=mnist.train.num_examples//batch_size

x=tf.placeholder(tf.float32,[None,784])

y=tf.placeholder(tf.float32,[None,10])

#初始化权值

weights=tf.Variable(tf.truncated_normal([lstm_size,n_classes],stddev=0.1))

#初始化偏置值

biases=tf.Variable(tf.constant(0.1,shape=[n_classes]))

#定义RNN网络

def RNN(X,weights,biases):

#inputs=[batch_size,max_time,n_input]

inputs=tf.reshape(X,[-1,max_time,n_inputs])

#定义LSTM基本cell

lstm_cell=tf.contrib.rnn.BasicLSTMCell(lstm_size)#隐藏单元个数

'''

final_state[0]：cell_state 一个隐藏单元的中间结果

final_state[1]:hidden_state 一个隐藏单元的最终输出

'''

outputs,final_state=tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)

results=tf.nn.softmax(tf.matmul(final_state[1],weights)+biases)

return results

#计算RNN的返回结果

prediction=RNN(x,weights,biases)

#损失函数

cross_entrogy=tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction)

correct_accuracy=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))

accuracy=tf.reduce_mean(tf.cast(correct_accuracy,tf.float32))

init=tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

for epoch in range (6):

for batch in range(n_batch):

batch_xs,batch_ys=mnist.train.next_batch(batch_size)

sess.run(train,feed_dict={x:batch_xs,y:batch_ys})

acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})

print "Iter"+str(epoch)+",Testing Accuracy:"+str(acc）

7.1 保存训练好的模型的参数和结构

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 18:01:22 2018

@author: sduhao
"""

#保存训练好的模型的结构和参数
#使用手写数字V1版本演示
#再次运行重启kernal
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

batch_size=100
n_batch=mnist.train.num_examples//batch_size

x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])

W=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
prediction=tf.nn.softmax(tf.matmul(x,W)+b)

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))

init=tf.global_variables_initializer()

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

saver=tf.train.Saver()

with tf.Session() as sess:
sess.run(init)
for epoch in range(11):
for batch in range(n_batch):
batch_xs,batch_ys=mnist.train.next_batch(batch_size)
sess.run(train,feed_dict={x:batch_xs,y:batch_ys})
acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print "Iter"+str(epoch)+",Testing Accuracy"+str(acc)
saver.save(sess,'net/my_net.ckpt') #把训练好的模型的结构和参数保存在net目录下

7.2 测试时载入之前训练好的模型

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 18:22:20 2018

@author: sduhao
"""

#调用之前训练好的模型做测试
#使用手写数字V1版本演示
#再次运行重启kernal
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

batch_size=100
n_batch=mnist.train.num_examples//batch_size

x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])

W=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
prediction=tf.nn.softmax(tf.matmul(x,W)+b)

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))

init=tf.global_variables_initializer()

correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

saver=tf.train.Saver()

with tf.Session() as sess:
sess.run(init)
saver.restore(sess,'net/my_net.ckpt') #直接调用之前保存的训练好的模型
print sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})

7.3 下载GoolgleNet-V3并查看结构

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 18:30:15 2018

@author: sduhao
"""

import tensorflow as tf
import os
import tarfile
import requests

#inception模型下载地址

#模型存放地址
inception_pretrain_model_dir="inception_model"
if not os.path.exists(inception_pretrain_model_dir): #没有该路径 则在当前路径下新建
os.makedirs(inception_pretrain_model_dir)

#获取文件名，以及文件路径
filename=inception_pretrain_model_url.split('/')[-1]
filepath=os.path.join(inception_pretrain_model_dir,filename)

#下载模型
if not os.path.exists(filepath):
r=requests.get(inception_pretrain_model_url,stream=True)
with open(filepath,'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print "finish: ",filename

#解压文件
tarfile.open(filepath,'r:gz').extractall(inception_pretrain_model_dir)

#模型结构存放文件
log_dir='inception_log'
if not os.path.exists(log_dir):
os.makedirs(log_dir)

inception_graph_def_file=os.path.join(inception_pretrain_model_dir,'classify_image_graph_def.pb')

with tf.Session() as sess:
with tf.gfile.FastGFile(inception_graph_def_file,'rb') as f:
graph_def=tf.GraphDef()
tf.import_graph_def(graph_def,name='')
#保存图的结构
writer=tf.summary.FileWriter(log_dir,sess.graph)
writer.close()

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 18:58:18 2018

@author: sduhao
"""

#使用inception-V3做各种图像识别

import tensorflow as tf
import os
import numpy as np
import re
from PIL import Image
import matplotlib.pyplot as plt

class NodeLookup(object):
def __init__(self):
#存放<类别编号，字符串编号>对  1000个类别
label_lookup_path='inception_model/imagenet_2012_challenge_label_map_proto.pbtxt'
#存放<字符串编号，类别描述>对
uid_lookup_path='inception_model/imagenet_synset_to_human_label_map.txt'

#加载字符串******分类描述文件
uid_to_human={}

#按行读取数据
for line in proto_as_ascii_lines:
#去掉换行符
line=line.strip('\n')
#按照\t分割
parsed_items=line.split('\t')
#获取分类字符串编号
uid=parsed_items[0]
#获取分类名称
human_string=parsed_items[1]
#保存字符串编号到分类描述的映射关系
uid_to_human[uid]=human_string

#加载分类字符串编号******分类编号（1-1000）文件
node_id_to_uid={}
for line in proto_as_ascii:
if line.startswith('  target_class:'):
#获取分类编号1-1000
target_class=int(line.split(': ')[1])
if line.startswith('  target_class_string:'):
#获取编号字符串
target_class_string=line.split(': ')[1]
#保存分类编号和字符串编号的映射关系
node_id_to_uid[target_class]=target_class_string[1:-2]

#建立分类编号到分类名称的映射关系
node_id_to_name={}
for key,val in node_id_to_uid.items():
#获取分类名称
name=uid_to_human[val]
#建立分类编号到分类名称的映射
node_id_to_name[key]=name
return node_id_to_name

def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]

with tf.gfile.FastGFile('inception_model/classify_image_graph_def.pb','rb') as f:
graph_def=tf.GraphDef()
tf.import_graph_def(graph_def,name='')

with tf.Session() as sess:
softmax_tensor=sess.graph.get_tensor_by_name('softmax:0')
#遍历目录
for root,dirs,files in os.walk('images/'):
for f in files:
#载入图片
predictions=sess.run(softmax_tensor,{'DecodeJpeg/contents:0':image_data})#.jpg格式

#打印图片路径和名称
image_path=os.path.join(root,f)
print image_path

#显示图片
img=Image.open(image_path)
plt.imshow(img)
plt.axis('off')
plt.show()

#排序 取出概率最大的五个类别编号(从大到小)
top_k=predictions.argsort()[-5:][::-1]
node_lookup=NodeLookup()

for node_id in top_k:
#获取分类名称
human_string=node_lookup.id_to_string(node_id)
#获得该分类的置信度
score=predictions[node_id]
print '%s (score=%.5f)' % (human_string,score)
print()

• 评论

• 上一篇
• 下一篇