numpy和pytorch实现梯度下降法
- numpy:import numpy as np
#构造一个函数
def func(x,y):
return (1-x)**2+100*(y-x**2)**2
#函数对x求导
def dz_dx(x,y):
return 2*x-400*(y-x**2)*x-2
#函数对y求导
``def dz_dy(x,y):
return 200*(y-x**2)
#梯度下降
def Grad(learinng_rate,Max_iter):
value = np.zeros(2)#构造初始值
loss = 10.0
iter_count = 0
#当自变量的变化时因变量的变化非常小时或者迭代次数没有达到最大时
while loss > 0.001 and iter_count < Max_iter:
error = np.zeros(2)
error[0] = dz_dx(value[0],value[1])
error[1] = dz_dy(value[0],value[1])
#当各自的偏导数求出来以后就需要对各自方向的值进行更新
for i in range(2):
value[i] = value[i]-learinng_rate*error[i]
loss = func(value[0],value[1])
print('迭代次数:',iter_count,'损失:',loss)
iter_count += 1
return value
if __name__ == '__main__':
print(Grad(0.001,10000))
-
pytorch:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
def forward(x):
return x * w
def loss(x, y):
y_pred = forward(x)
return (y_pred - y)*(y_pred - y)#compute gradient (loss对w求导)
def gradient(x, y):
return 2x(x*w - y)
print(“predict (before training)”, 4, forward(4))
#训练
for epoch in range(20):
for x, y in zip(x_data, y_data):
grad = gradient(x, y)
w = w - 0.01 * grad
print("\t grad: ",x, y, grad)
l = loss(x, y)
print(“progress:”, epoch, l)#训练后
print(“predict (after training)”, 4, forward(4))
numpy和pytorch实现线性回归
还在研究中…
pytorch实现一个简单的神经网络
还在研究中…