用tensorflow实现线性回归和相应的损失函数的运用

版权声明:本文为博主原创文章,遵循 CC 4.0 by-sa 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/qq_30339595/article/details/79211347
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess=tf.Session()
iris=datasets.load_iris()
x_vals=np.array([x[3] for x in iris.data])
y_vals=np.array([y[0] for y in iris.data])
learning_rate=0.05
batch_size=25
x_data=tf.placeholder(shape=[None,1],dtype=tf.float32)
y_target=tf.placeholder(shape=[None,1],dtype=tf.float32)
A=tf.Variable(tf.random_normal(shape=[1,1]))
b=tf.Variable(tf.random_normal(shape=[1,1]))
model_output=tf.add(tf.matmul(x_data,A),b)
loss=tf.reduce_mean(tf.square(y_target-model_output))
init=tf.global_variables_initializer()
sess.run(init)
my_opt=tf.train.GradientDescentOptimizer(learning_rate)
train_step=my_opt.minimize(loss)
loss_vec=[]
for i in range(100):
    rand_index=np.random.choice(len(x_vals),size=batch_size)
    rand_x=np.transpose([x_vals[rand_index]])
    rand_y=np.transpose([y_vals[rand_index]])
    sess.run(train_step,feed_dict={x_data:rand_x,y_target:rand_y})
    temp_loss=sess.run(loss,feed_dict={x_data:rand_x,y_target:rand_y})
    loss_vec.append(temp_loss)
    if(i+1)%25==0:
        print('step #'+str(i+1)+'A = '+str(sess.run(A))+'b = '+str(sess.run(b)))
        print('Loss = '+str(temp_loss))
[slope]=sess.run(A)
[y_intercept]=sess.run(b)
best_fit=[]
for i in x_vals:
    best_fit.append(i*slope+y_intercept)
plt.plot(x_vals,y_vals,'o',label='Data Points')
plt.plot(x_vals,best_fit,'r-',label='Best fit line',linewidth=3)
plt.legend(loc="upper left")
plt.title('sepal length vs pedal width')
plt.xlabel('pedal width')
plt.ylabel('sepal length')
plt.show()
plt.plot(loss_vec,'k-')
plt.title('L2 loss per generation')
plt.xlabel('Generation')
plt.ylabel('L2 Loss')

learning_rate=0.1
iteration=50
loss_l1=tf.reduce_mean(tf.abs(y_target-model_output))
init=tf.global_variables_initializer()
sess.run(init)
my_opt_l1=tf.train.GradientDescentOptimizer(learning_rate)
train_step_l1=my_opt_l1.minimize(loss_l1)
loss_vec_l1=[]
for i in range(iteration):
    rand_index=np.random.choice(len(x_vals),size=batch_size)
    rand_x=np.transpose([x_vals[rand_index]])
    rand_y=np.transpose([y_vals[rand_index]])
    sess.run(train_step_l1,feed_dict={x_data:rand_x,y_target:rand_y})
    temp_loss_l1=sess.run(loss_l1,feed_dict={x_data:rand_x,y_target:rand_y})
    loss_vec_l1.append(temp_loss_l1)
    if(i+1)%25==0:
        print('step #'+str(i+1)+'A = '+str(sess.run(A))+'b = '+str(sess.run(b)))
        print('Loss = '+str(temp_loss))
plt.plot(loss_vec,'k-',label='L2 loss')
plt.plot(loss_vec_l1,'r--',label='L1 loss')

展开阅读全文

没有更多推荐了,返回首页