from numpy import genfromtxt import pandas as pd data=pd.read_csv("D:\\boshidun.csv") x0=data["mianji"].values.astype(int) x1=data["woshi"].values.astype(int) y=data["jiage"].values.astype(int) # print(len(x0)) # print(len(y)) # print(len(x0)) # print(len(x1)) #给θ0,θ1,θ2赋值 a0= 0 a1 =0 a2 =0 #赋予学习率,迭代次数 alpha=0.00001 epochs=2000 #损失函数 def loss_error(a0,a1,a2,x0,x1,y): sum = 0 m = float(len(x0)) for i in range(0, len(x0)): sum +=((a0 + a1 * x0[i]+a2 * x1[i]) - y[i])**2 return sum /(m*2) #增加梯度下降 def train(a0,a1,a2,x0,x1,y ,alpha, epochs): m = float(len(x0)) for i in range(epochs): #梯度值 a0_gradient=0 a1_gradient=0 a2_gradient=0 for j in range(0, len(x0)): a0_gradient += ((a0+a1*x0[j]+a2*x1[j])-y[j]) a1_gradient += x0[j] * ((a0+a1*x0[j]+a2*x1[j])-y[j]) a2_gradient += x1[j] * ((a0+a1*x0[j]+a2*x1[j])-y[j]) #防止出现overflow encountered in multiply a0 = a0-(alpha * (1/m)*a0_gradient) a1 = a1-(alpha * (1/m)*a1_gradient) a2 = a2-(alpha * (1/m)*a2_gradient) return a0,a1,a2 print("loss_error:",loss_error(a0,a1,a2,x0,x1,y)) a0,a1,a2= train(a0,a1,a2,x0,x1,y ,alpha, epochs)#赋予新的参数 print("赋予新的参数后得到的新的loss_error:",loss_error(a0,a1,a2,x0,x1,y))