TensorFlow可以将一个计算图划分成多个子图,然后并行地在多个CPU或者GPU上执行
创建一个计算图并在会话中执行
import tensorflow as tf
x = tf.Variable(3,name="x")
y = tf.Variable(4,name="y")
f = x*x*y + y +2
下面的代码创建一个会话,初始化所有变量,然后求值,最后F关闭整个会话
sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
print(f)
sess.close()
等价于
with tf.Session() as sess:
x.initializer.run()
y.initializer.run()
result = f.eval()
还可以使用global_variables_initializer()函数来完成同样的动作.这个操作不会立刻初始化,只是
在图中创建了一个节点,这个节点会在会话执行时初始化所有变量。
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
result = f.eval()
例子:
#基本计算单元赋值
import tensorflow as tf
w = tf.Variable([[0.5,1.0]])
x = tf.Variable([2.0],[1.0])
y = tf.matmul(w,x) // 创建w,x,y变量
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) //初始化 运行
print(y.eval()) #打印数值需要.eval()
#常用构造数据
### 'tensor' is [[1,2,3], [4,5,6]] ###
tf.zeros([3,4], int32) ==>[[0,0,0,0],[0,0,0,0],[0,0,0,0]]
tf.zeros_like(tensor) ==>[[0,0,0],[0,0,0]]
tf.ones([2,3], int32) ==>[[0,0,0],[0,0,0]]
tf.ones_like(tensor) ==>[[1,1,1],[1,1,1]]
tensor = tf.constant([1,2,3,4,5,6,7]) =>[1 2 3 4 5 6 7]
tensor = tf.constant(-1.0,shape=[2,3]) =>[[-1. -1. -1.] [-1. -1. -1.]]
tf.linspace(10.0, 12.0, 3, name="linspace") =>[10.0 11.0 12.0]
tf.range(start,limit,delta) ==> [3,6,9,12,15] #'start'3 'limit'18 'delta'3
norm = tf.random_normal([2,3], mean=-1, stddev=4) #均值为-1,方差为4的分布
c=tf.constant([1,2],[3,4],[5,6])
shuff = tf.random_shuffle(c) #洗牌
#tensorflow实现0连续加三次1
state = tf.Variable(0)
new_value = tf.add(state, tf.constant(1))
update = tf.assign(state, new_value) #把新值赋给state
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(state))
for _ in range(3):
sess.run(update)
print(sess.run(state))
#Saver保存
w = tf.Variable([[0.5, 1.0]])
x = tf.Variable([[2.0], [1.0]])
y = tf.matmul(w, x)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init_op)
#保存变量到磁盘上
save_path = saver.save(sess, "C://tensorflow//model//test")
print("Model saved in file:", save_path)
#numpy数据转tensorflow
import numpy as np
a = np.zeros((3,3))
ta = tf.convert_to_tensor(a)
with tf.Session() as sess:
print(sess.run(ta))
#先定义空白模块后赋值
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.mul(input1, input2)
with tf.Session() as sess:
print(sess.run([output],feed_dict={input1:[7.], input2:[2.]}))
#构造线性回归模型 验证 是否学习到数据集的参数
num_point = 1000 //随机生成1000个点,围绕在y=0.1*x+0.3的直线周围
vectors_set = []
for i in range(num_points):
x1 = np.random.normal(0.0, 0.55) //高斯初始化,均值为0,方差为0.55
y1 = x1*0.1 + 0.3 +np.random.normal(0.0, 0.03)
vectors_set.append([x1,y1])
x_data = [v[0] for v in vectors_set] //构造样本点
y_data = [v[1] for v in vectors_set]
plt.scatter(x_data, y_data, c='r')
plt.show()
//构造模型 梯度下降 验证参数
W=tf.Variable(tf.random_uniform([1],-1.0,1.0), name='W') //构造一维矩阵,取值从-1.0到1.0
b=tf.Variable(tf.zeros([1],name='b')
y = W*x_data + b
loss =tf.reduce_mean(tf.square(y-y_data), name='loss') //预估值和实际值之间的均方误差作为损失函数
optimizer = tf.train.GradientDescentOptimizer(0.5) //使用梯度下降方法来优化参数
train = optimizer.minimize(loss, name='train')
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
print("W=",sess.run(W), "b=",sess.run(b), "loss=", sess.run(loss))
for step in range(20):
sess.run(train)
print("W=",sess.run(W), "b=",sess.run(b), "loss=",sess.run(loss))
#tensorflow构造逻辑回归模型
x=tf.placeholder("float", [None, 784])
y=tf.placeholder("float", [None, 10])
W=tf.Variable(tf.zeros([784, 10]))
b=tf.Variable(tf.zeros([10]))
actv = tf.nn.softmax(tf.matmul(x, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv), reduction_indices=1)) #*y 代表只计算正确分类的损失值
learning_rate = 0.01
optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
pred = tf.equal( tf.argmax(actv, 1), tf.argmax(y,1) )
accr = tf.reduce_mean(tf.cast(pred, "float")) #对了是0,错了是1
init = tf.global_variables_initializer()
training_epochs = 50 #所有样本迭代50次
batch_size = 100
display_step =5
sess =tf.Session()
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0
num_batch = int(mnist.train.num_examples/batch_size)
for i in range(num_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optm, feed_dict={x:batch_xz, y:batch_ys})
feeds ={x:batch_xs, y:batch_ys}
avg_cost +=sess.run(cost, feed_dict=feeds)/num_batch
#DISPLAY
if epoch % display_step == 0:
feeds_train = {x:batch_xs, y:batch_ys}
feeds_test = {x:mnist.test.images, y:mnist.test.labels}
train_acc = sess.run(accr, feed_dict=feeds_train)
test_acc = sess.run(accr, feed_dict=feeds_test)
#注释部分:
sess = tf.InteractiveSession()
arr = np.array([[],[],[]])
tf.rank(arr).eval() #查看数组arr 的维度
tf.shape(arr).eval()
tf.argmax(arr, 0).eval() #求最大值的索引(按列/行寻找最大索引)
一个TensorFlow程序通常可以分成两部分:
第一部分用来构建一个计算图
第二部分来执行这个图
w = tf.constant(3)
x = w+2
y = x+5
z = x*3
with tf.Session() as sess:
print(y.eval())
print(z.eval())
在图的每次执行期间,所有节点值都会被丢弃,但是变量的值不会。
变量的生命周期从初始化器的执行开始,到关闭会话才结束
TensorFlow中的线性回归
tensorflow 中的操作 可以接受任意数量的输入,也可以产生任意数量的输出。
输入和输出都是多维数组,叫作张量。
首先,获取数据,对所有训练实例添加一个额外的偏移(x0=1)。接下来,创建两个TensorFlow的常量节点,x和y以及目标,代码中还使用了TensorFlow提供的矩阵操作来定义theta。这些矩阵相关函数transpose(),matmul()和matrix_inverse()都是自解释的。
import numpy as np
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
m,n = housing.data.shape
housing_data_plus_bias = np.c_[np.ones((m,1)), housing.data]
X= tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X")
y= tf.constant(housing.target.reshape(-1,1),dtype=tf.float32,name="y")
XT=tf.transpose()
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT,X)),XT),y)
with tf.Session() as sess:
theta_value = theta.eval()
①实现普通神经网络
#神经网络
n_hidden_1 = 256
n_hidden_2 = 128
n_input = 784
n_classes = 10
x = tf.placeholder("float",[None,n_input])
y = tf.placeholder("float",[None,n_classes])
stddev = 0.1
weights = {
'w1':tf.Variable(tf.random_normal([n_input, n_hidden_1],stddev=stddev)),
'w2':tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2],stddev=stddev)),
'out':tf.Variable(tf.random_normal([n_hidden_2,n_classes],stddev=stddev)),
}
biases = {
'b1':tf.Variable(tf.random_normal([n_hidden_1])),
'b2':tf.Variable(tf.random_normal([n_hidden_2])),
'out':tf.Variable(tf.random_normal[n_classes])
}
print("NETWORK READY")
#前向传播函数
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.sigmod(tf.add(tf.matual(_X, _weights['w1']), _biases['b1']))
layer_2 = tf.nn.sigmod(tf.add(tf.matual(layer_1,_weights['w2']), _biases['b2']))
return (tf.matual(layer_2, _weights['out']) + _biases)
#主函数
pred = multilayer_perceptron(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y))
optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
corr = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(corr,"float"))
init = tf.global_variables_initializer()
print("FUNCTIONS READY")
training_epochs = 20
batch_size = 100
display_step = 4
sess = tf.Session()
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x:batch_xs, y:batch_ys}
sess.run(optm, feed_dict=feeds)
avg_cost += sess.run(cost, feed_dict=feeds)
avg_cost = avg_cost / total_batch
#DISPLAY
if(epoch+1) % display_step == 0:
print(epoch,training_epochs,avg_cost)
#打印训练集的准确度
feeds = {x:batch_xs, y:batch_ys}
train_acc = sess.run(accr, feed_dict=feeds)
print("TRAIN ACCURACY:%.3f"%(train_acc))
#打印测试集的准确度
feeds = {x:mnist.test.images, y:mnist.test.labels}
test_acc = sess.run(accr, feed_dice=feeds)
print("TEST ACCURACY:%.3f"%(test_acc))
print("OPTIMIZATION FINISHED")
②实现卷积神经网络
n_input=784
n_output=10
weights = { 'wc1':tf.Variable(tf.random_normal([3,3,1,64], stddev=0.1)),
'wc2':tf.Variable(tf.random_normal([3,3,64,128], stddev=0.1)),
'wd1':tf.Variable(tf.random_normal([7*7*128,1024], stddev=0.1)),
'wd2'tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases = {'bc1':tf.Variable(,tf.random_normal([64], stddev=0.1)),
'bc2':tf.Variable(,tf.random_normal([128], stddev=0.1)),
'bd1':tf.Variable(,tf.random_normal([1024], stddev=0.1)),
'bd2':tf.Variable(,tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
_input_r = tf.reshape(_input, shape=[-1, 28, 28 ,1]) #n, h, w, c
#第一个卷积池化层
_conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1,1,1,1], padding='SAME')
_conv1 =tf.nn.relu( tf.nn.bias_a,dd(_conv1, _b['bc1']) )
_pool1 =tf.nn.max_pool(_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
_pool_dr1 = tf.nn.dropout(_pool1, _keepratio) #随机杀死一些节点
#第2个卷积池化层
_conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1,1,1,1], padding='SAME')
_conv2 =tf.nn.relu( tf.nn.bias_a,dd(_conv2, _b['bc2']) )
_pool2 =tf.nn.max_pool(_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
_pool_dr2 = tf.nn.dropout(_pool2, _keepratio) #随机杀死一些节点
#全连接层
_dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
x=tf.placeholder(tf.float32, [None, n_input])
y=tf.placeholder(tf.float32, [None, n_output])
keepratio = tf.placeholder(tf.float32)
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(_corr, tf.float32))
init = tf.global_variables_initializer()
print("GRAPH READY")
sess = tf.Session()
sess.run(init)
training_epochs = 15
batch_size = 16
diaplay_step = 1
for epoch in range(training_epochs):
avg_cost=0
total_batch=10
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optm, feed_dict={x:batch_xs, y:batch_ys, keepratio:0.7})
avg_cost += sess.run(cost, feed_dict={x:batch_xs, y:batch_ys, keepratio:1.})/total_batch
if epoch%display_step == 0:
print(" "%(epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x:batch_ys, y:batch_ys, keepratio:1.})
print("Training accuracy:%.3f" %(train_acc))
#查看函数
print(help(tf.nn.conv2d))