这篇文章写一下如何在Python中实现非线性回归
#定义数据
def genData(numPoints,bias,variance):
x=np.zeros(shape=(numPoints,2))
y=np.zeros(shape=(numPoints))
for i in range(0,numPoints):
x[i][0]=1#每一行的第一列第一个数字等于1
x[i][1]=i#每一行第二个数字是i
y[i]=(i+bias)+random.uniform(0,1)*variance#y的赋值,100行,一列
return x,y
x,y=genData(100,25,10)
print('x:',x)
print('y:',y)
m,n=np.shape(x)
查看x和y的形状
m,n=np.shape(x)
print('x:shape:',m,n)
n_y=np.shape(y)
print('y:shape:',n_y)
再来写一下梯度下降算法
def gradientDescent(x,y,theta,alpha,m,numIterations):#m,实例的个数
xTrans=x.transpose()
for i in range(0,numIterations):
hypothesis=np.dot(x,theta)
loss=hypothesis-y
cost=np.sum(loss**2)/(2*m)
print('Iteration %d / Cost:%f'%(i,cost))
gradient=np.dot(xTrans,loss)/m
theta=theta-alpha*gradient #更新法则
return theta
全部代码:
import numpy as np
import random
def gradientDescent(x,y,theta,alpha,m,numIterations):#alpha,学习率;m,实例的个数;numIterations,迭代次数
xTrans=x.transpose()#矩阵的转置
for i in range(0,numIterations):
hypothesis=np.dot(x,theta)#求内积
loss=hypothesis-y#损失函数
cost=np.sum(loss**2)/(2*m)
print('Iteration %d / Cost:%f'%(i,cost))
gradient=np.dot(xTrans,loss)/m
theta=theta-alpha*gradient #更新法则
return theta
def genData(numPoints,bias,variance):
x=np.zeros(shape=(numPoints,2))
y=np.zeros(shape=(numPoints))
for i in range(0,numPoints):
x[i][0]=1#每一行的第一列第一个数字等于1
x[i][1]=i#每一行第二个数字是i
y[i]=(i+bias)+random.uniform(0,1)*variance#y的赋值,100行,一列
return x,y
x,y=genData(100,25,10)
print('x:',x)
print('y:',y)
m,n=np.shape(x)
print('x:shape:',m,n)
n_y=np.shape(y)
print('y:shape:',n_y)
numIterations=10000
alpha=0.005
theta=np.ones(n)
theta=gradientDescent(x,y,theta,alpha,m,numIterations)