import torch
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
tf.compat.v1.disable_eager_execution()#忽略v1,v2的差别
num=1000
vector_set=[]for i inrange(num):
x1=np.random.normal(0.0,0.55)
y1=x1*0.1+0.3+np.random.normal(0.0,0.03)
vector_set.append([x1,y1])
x_data=[v[0]for v in vector_set]
y_data=[v[1]for v in vector_set]
plt.scatter(x_data,y_data,c='r')
plt.show()#生成基本数据
W=tf.Variable(tf.random.uniform([1],-1.0,1.0),name="W")
b=tf.Variable(tf.zeros([1]),name="b")
y=W*x_data+b
loss=tf.reduce_mean(tf.square(y-y_data),name="loss")
opt=tf.compat.v1.train.GradientDescentOptimizer(0.5)#拿到一个梯度下降法的训练,并附上学习率
train=opt.minimize(loss,name='train')#从这个训练里获取一个真正的训练,里面放上loss
sess=tf.compat.v1.Session()
init=tf.compat.v1.global_variables_initializer()
sess.run(init)#拿一个sess# print('W=',sess.run(W))for i inrange(20):
sess.run(train)#run表示运行上面的语句print("loss=",sess.run(loss))
TensorFlow的线性回归练习(熟悉tf)import torchimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as plttf.compat.v1.disable_eager_execution()#忽略v1,v2的差别num=1000vector_set=[]for i in range(num): x1=np.random.normal(0.0,0.55) y1=x1*0.1+