Pytorch 任务二

该博客通过对比numpy和PyTorch,详细介绍了如何实现梯度下降法,包括线性回归的numpy与PyTorch版本,并进一步用PyTorch构建了一个简单的神经网络。
摘要由CSDN通过智能技术生成

任务:

  1. numpy和pytorch实现梯度下降法
  2. 设定初始值
  3. 求取梯度
  4. 在梯度方向上进行参数的更新
  5. numpy和pytorch实现线性回归
  6. pytorch实现一个简单的神经网络

1. numpy和pytorch实现梯度下降法

  • numpy实现梯度下降法
import numpy as np 
#构造一个函数
def func(x,y):
    return (1-x)**2+100*(y-x**2)**2 
#函数对x求导
def dz_dx(x,y):
    return 2*x-400*(y-x**2)*x-2 
#函数对y求导
def dz_dy(x,y):
    return 200*(y-x**2)
#梯度下降
def Grad(learinng_rate,Max_iter):
    value = np.zeros(2)#构造初始值
    loss = 10.0
    iter_count = 0 
    #当自变量的变化时因变量的变化非常小时或者迭代次数没有达到最大时
    while loss > 0.001 and iter_count < Max_iter:
        error = np.zeros(2)
        error[0] = dz_dx(value[0],value[1])
        error[1] = dz_dy(value[0],value[1]) 
        #当各自的偏导数求出来以后就需要对各自方向的值进行更新
        for i in range(2):
            value[i] = value[i]-learinng_rate*error[i]
        loss = func(value[0],value[1])
        print('迭代次数:',iter_count,'损失:',loss)
        iter_count += 1
    return value
 
if __name__ == '__main__':
    print(Grad(0.001,10000))
  • pytorch实现梯度下降法
import torch
from torch.autograd import Variable
x = torch.Tensor([1])
print("grad",x.grad,"data",x.data) 
learning_rate = 0.00001
epochs = 10
for epoch in range(epochs):
    y = x**2 + 2*x + 1
    y.requires_grad = True
    y.backward()   
    print("grad",y.grad)
    x.data = x.data - learning_rate * y.grad
    y.grad.data.zero_()
    
if __name__ == '__main__':
    print(x.data)

2. numpy和pytorch实现线性回归

  • numpy实现线性回归
import numpy as np
import matplotlib.pyplot as plt
def get_fake_data(batch_size=8):
    ''' 产生随机数据:y=x*2+3,加上了一些噪声'''
    x = np.random.rand(batch_size, 1) * 5
    y = x * 2 + 3 + np.random.rand(batch_size, 1)*2
    return x, y
 
def get_gradient(theta,x,y):
    m=x.shape[0]
    Y_estimate=np.dot(x,theta)
    assert (Y_estimate.shape==(m,))
    error=Y_estimate-y
    assert (error.shape==(m,))
    cost =1.0/(2*m)*np.sum(error**2)
    #grad=(1.0/m)*np.dot(x.T,error).reshape(-1)#(2,)
    grad = (1.0 / m) * np.dot(error,x) # (2,)
    return grad,cost
def gradient_descent(x,y,iterations,alpha):
 
    theta=np.random.randn(2)
    costs=[]
    for i in range(iterations):
        grad,cost=get_gradient(theta,x,y)
        new_theta=theta-alpha*grad
        if i%100==0:
            print('{} iterations cost={}'.format(i,cost))
            costs.append(cost)
        theta=new_theta
    return costs,theta
 
def vis_data():
    # 来看看产生的x-y分布
    x, y = get_fake_data(batch_size=16)
    print(x.shape)
    print(y.shape)
    plt.scatter(np.squeeze(x), np.squeeze(y))
    plt.show()
if __name__=='__main__':
    batch_size=32
    data_x, data_y = get_fake_data(batch_size=batch_size)
    #添加一列为1的向量 实际上就是乘以 theta 就是b
    data_x=np.hstack((data_x,np.ones_like(data_x)))#(m,2)
    print(data_x)
    print(data_x.shape)
 
    costs,theta=gradient_descent(data_x,np.squeeze(data_y),iterations=50000,alpha=0.002)
    print(data_y.shape)
 
    #print(theta)
    y_predict=np.dot(data_x,theta)#theta[0]+theta[1]*data_x[:,1]
    print(y_predict.shape)
    plt.figure()
    #样本图
    print(data_x[:2])
    plt.scatter(data_x[:,0],np.squeeze(data_y),c='red')
    plt.plot(data_x[:,0],y_predict)
    plt.show()
  • pytorch实现线性回归
import numpy as np
import matplotlib.pyplot as plt
import torch as t
 
device=t.device('cpu')
 
def get_fake_data(batch_size=8):
    ''' 产生随机数据:y=x*2+3,加上了一些噪声'''
    x = t.rand(batch_size, 1,device=device) * 5
    y = x * 2 + 3 + t.rand(batch_size, 1)*2
    return x, y
 
def vis_data():
    # 来看看产生的x-y分布
    x, y = get_fake_data(batch_size=16)
    print(x.shape)
    print(y.shape)
    plt.scatter(np.squeeze(x), np.squeeze(y))
    plt.show()
if __name__=='__main__':
    # vis_data()
 
    m=batch_size=32
    data_x, data_y = get_fake_data(batch_size=batch_size)
    #添加一列为1的向量 实际上就是乘以 theta 就是b
    data_x=t.from_numpy(np.hstack((data_x,np.ones_like(data_x))))#(m,2)
    print(data_x.shape)
 
    theta = t.randn((2, 1),requires_grad=True)
    iterations=500
    lr = 0.005  # 学习率
    losses=[]
    for i in range(iterations):
        # forward:计算loss
        y_pred = data_x.mm(theta)
        print('y_pred',y_pred.shape)
        loss = 1/(2*m) * (y_pred - data_y) ** 2
        print('loss',loss.shape)
        loss = loss.sum()
        print('loss', loss.shape)
        losses.append(loss.item())
 
        # backward:手动计算梯度
        loss.backward()
 
        # 更新参数
        theta.data.sub_(lr * theta.grad.data)
 
        # 梯度清零
        theta.grad.data.zero_()
    print('losses=',losses)
    # 画图
    plt.scatter(np.squeeze(data_x[:,0]), np.squeeze(data_y),c='red')
    y_predict=data_x.mm(theta)
    print('y_predict.shape',y_predict.shape)
    print(data_x.detach().numpy())
    plt.plot(data_x.detach().numpy()[:,0], y_predict.detach().numpy())  # predicted
	plt.show()

6. pytorch实现一个简单的神经网络

#简单的三层网络
import torch
n_input, n_hidden, n_output = 5, 3, 1
#参数初始化
x = torch.randn((1, n_input))
y = torch.randn((1, n_output))
w1 = torch.randn(n_input, n_hidden) 
w2 = torch.randn(n_hidden, n_output)
b1 = torch.randn((1, n_hidden)) 
b2 = torch.randn((1, n_output)) 
#前向传播
def sigmoid_activation(z):
    return 1 / (1 + torch.exp(-z))
z1 = torch.mm(x, w1) + b1
a1 = sigmoid_activation(z1)
z2 = torch.mm(a1, w2) + b2
output = sigmoid_activation(z2)
#损失计算
def sigmoid_delta(x):
  return x * (1 - x)
delta_output = sigmoid_delta(output)
delta_hidden = sigmoid_delta(a1)
d_outp = loss * delta_output
loss_h = torch.mm(d_outp, w2.t())
d_hidn = loss_h * delta_hidden
#更新参数
learning_rate = 0.1
w2 += torch.mm(a1.t(), d_outp) * learning_rate
w1 += torch.mm(x.t(), d_hidn) * learning_rate
b2 += d_outp.sum() * learning_rate
b1 += d_hidn.sum() * learning_rate

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值