src website:https://github.com/aymericdamien/TensorFlow-Examples/
主要介绍下近邻分类,线性回归,逻辑回归
线性回归
简单来说,就是由一堆数据需要用一个线性函数拟合它们
大致过程就是用梯度下降算法最小化损失函数
#coding=utf-8
#线性回归
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01#学习率
training_epochs = 2000#迭代次数
display_step = 50#显示频率
# Training Data原始序列
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
X = tf.placeholder("float")
Y = tf.placeholder("float")
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# 创建一个线性模型
activation = tf.add(tf.mul(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss?
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
# for (x, y) in zip(train_X, train_Y):
# sess.run(optimizer, feed_dict={X: x, Y: y})
#原github是这样写的,但我认为没必要,欢迎评论指正
sess.run(optimizer,feed_dict={X:train_X,Y:train_Y})
#Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
print "cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}),"W=", sess.run(W), "b=", sess.run(b)
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
#o表示数据点用圆表示,且不连线,ro表示红色圆圈
#-数据点用实现连接
plt.legend()#绘制图例
plt.show()
再看一个线性例子
#coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
nump_points = 1000
vectors_set = []
lr=0.5#学习率
epoch=50
for i in xrange(nump_points):
xl=np.random.normal(0.0,0.55)
yl=xl*0.1+0.3+np.random.normal(0.0,0.03)#生成线性数据
vectors_set.append([xl,yl])
x_data=[v[0] for v in vectors_set]
y_data=[v[1] for v in vectors_set]
W=tf.Variable(tf.random_uniform([1],-1.0,1.0))
b=tf.Variable(tf.zeros([1]))
y=W*x_data+b
loss=tf.reduce_mean(tf.square(y-y_data))
optimizer=tf.train.GradientDescentOptimizer(lr).minimize(loss)
init=tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for step in range(epoch):
sess.run(optimizer)
if step%5==0:#每隔五次显示一次
print "W=",sess.run(W),"loss==",sess.run(loss)
print sess.run(W),sess.run(b)
plt.figure(1)
plt.title('src')
plt.plot(x_data, y_data, 'ro', label='Original data')
plt.legend()
plt.figure(2)
plt.title('dst')
plt.plot(x_data, y_data, 'ro', label='Original data')
plt.plot(x_data, sess.run(W) * x_data + sess.run(b))
plt.show()