02 Pytorch代码实践之【线性回归】

import torch
import matplotlib.pyplot as plt
# 准备数据
x = torch.Tensor([1.4, 5, 11, 16, 21])
y = torch.Tensor([14.4, 29.6, 62, 85.5, 113.4])
# 绘制散点图
plt.scatter(x.numpy(), y.numpy())
plt.show()

简单的线性回归

import torch
import matplotlib.pyplot as plt

# 生成矩阵X[5,2]
def Produce_X(x):
    x0 = torch.ones(x.numpy().size)
    X = torch.stack((x, x0), dim=1)
    return X


x = torch.Tensor([1.4, 5, 11, 16, 21])
y = torch.Tensor([14.4, 29.6, 62, 85.5, 113.4])
X = Produce_X(x)
inputs = X
target = y
# 生成向量w[2,1]
w = torch.rand(2, requires_grad=True)
print(X)
print(w)
tensor([[ 1.4000,  1.0000],
        [ 5.0000,  1.0000],
        [11.0000,  1.0000],
        [16.0000,  1.0000],
        [21.0000,  1.0000]])
tensor([0.2637, 0.4283], requires_grad=True)





tensor([0.2637, 0.4283])
def draw(output, loss):
    plt.cla()  # 清空画布
    plt.scatter(x.numpy(), y.numpy())
    plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
    plt.text(1, 0, 'loss=%s' % (loss.item()),
             fontdict={'size': 20, 'color': 'red'})
    plt.pause(0.005)
def train(epochs=1, learn_rate=0.01):
    for epoch in range(epochs):
        output = inputs.mv(w)
        loss = (output - target).pow(2).sum()
        loss.backward()
        w.data -= learn_rate * w.grad
        w.grad.zero_()  # 将模型的参数梯度初始化为0
        if epoch % 100 == 0:
            draw(output, loss)  # 重新绘制
    return w, loss


w, loss = train(1000, learn_rate=1e-4)
print("final loss:", loss.item())
print("weights:", w.data)

在这里插入图片描述
…省略多张图片…
在这里插入图片描述

 final loss: nan
    weights: tensor([nan, nan], device='cuda:0')

大批量数据处理

import torch
import matplotlib.pyplot as plt


# 生成矩阵X[5,2]
def Produce_X(x):
    x0 = torch.ones(x.numpy().size)
    X = torch.stack((x, x0), dim=1)
    return X


x = torch.linspace(-3, 3, 100000)
y = x + 1.2 * torch.rand(x.size())
X = Produce_X(x)
inputs = X
target = y
# 生成向量w[2,1]
w = torch.rand(2, requires_grad=True)


def draw(output, loss):
    plt.cla()  # 清空画布
    plt.scatter(x.numpy(), y.numpy(), s=0.001)
    plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
    plt.text(0.5, 0, 'loss=%s' % (loss.item()),
             fontdict={'size': 20, 'color': 'red'})
    plt.pause(0.005)


def train(epochs=1, learn_rate=0.01):
    for epoch in range(epochs):
        output = inputs.mv(w)
        loss = (output - target).pow(2).sum()
        loss.backward()
        w.data -= learn_rate * w.grad
        w.grad.zero_()  # 将模型的参数梯度初始化为0
        if epoch % 80 == 0:
            draw(output, loss)  # 重新绘制
    return w, loss


w, loss = train(1000, learn_rate=1e-4)
print("final loss:", loss.item())
print("weights:", w.data)

在这里插入图片描述
在这里插入图片描述

为什么两次训练都出现none?,猜测可能是损失值过大,溢出内存,我还不太明白,但是毕竟不影响后续的实验

final loss: nan
weights: tensor([nan, nan])
torch.cuda.is_available()
True

使用cuda训练

print(torch.cuda.is_available())
#cuda是否可用;

print(torch.cuda.device_count())
#返回gpu数量;

print(torch.cuda.get_device_name(0))
#返回gpu名字,设备索引默认从0开始;

print(torch.cuda.current_device())
#返回当前设备索引;
True
1
GeForce MX250
0
import torch
import matplotlib.pyplot as plt


# 生成矩阵X[5,2]
def Produce_X(x):
    x0 = torch.ones(x.numpy().size)
    X = torch.stack((x, x0), dim=1)
    return X


x = torch.linspace(-3, 3, 100000)
y = x + 1.2 * torch.rand(x.size())
X = Produce_X(x)
# inputs = X
# target = y
# 生成向量w[2,1]
w = torch.rand(2)

# --------------cuda-----------------
CUDA = torch.cuda.is_available()
if CUDA:
    inputs = X.cuda()
    target = y.cuda()
    w = w.cuda()
    w.requires_grad = True
else:
    inputs = X
    target = y
    w = w
    w.requires_grad = True


# --------------cuda-----------------
def draw(output, loss):
    plt.cla()  # 清空画布
    if CUDA:
        output = output.cpu()  # 还原为cpu类型才能进行绘图
    plt.scatter(x.numpy(), y.numpy(), s=0.001)
    plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
    plt.text(0.5, 0, 'loss=%s' % (loss.item()),
             fontdict={'size': 20, 'color': 'red'})
    plt.pause(0.005)


def train(epochs=1, learn_rate=0.01):
    for epoch in range(epochs):
        output = inputs.mv(w)
        loss = (output - target).pow(2).sum() / 100000
        loss.backward()
        w.data -= learn_rate * w.grad
        w.grad.zero_()  # 将模型的参数梯度初始化为0
        if epoch % 100 == 0:
            draw(output, loss)  # 重新绘制
    return w, loss


w, loss = train(2000, learn_rate=1e-4)
print("final loss:", loss.item())
print("weights:", w.data)

在这里插入图片描述

在这里插入图片描述

final loss: 0.17435269057750702
weights: tensor([0.8681, 0.6192], device='cuda:0')
a = torch.rand(2)
print(a)
print(a.device)
a = a.cuda()
print(a.device)
tensor([0.7874, 0.3465])
cpu
cuda:0

引入time包,记录训练时间长度

import torch
import matplotlib.pyplot as plt
from time import perf_counter
# 指定GPU的id
torch.cuda.set_device(0)
# 生成矩阵X[5,2]


def Produce_X(x):
    x0 = torch.ones(x.numpy().size)
    X = torch.stack((x, x0), dim=1)
    return X


x = torch.linspace(-3, 3, 100000)
y = x + 1.2 * torch.rand(x.size())
X = Produce_X(x)
# inputs = X
# target = y
# 生成向量w[2,1]
w = torch.rand(2)

# --------------cuda-----------------
CUDA = torch.cuda.is_available()
if CUDA:
    inputs = X.cuda()
    target = y.cuda()
    w = w.cuda()
    w.requires_grad = True
else:
    inputs = X
    target = y
    w = w
    w.requires_grad = True


# --------------cuda-----------------
def draw(output, loss):
    plt.cla()  # 清空画布
    if CUDA:
        output = output.cpu()  # 还原为cpu类型才能进行绘图
    plt.scatter(x.numpy(), y.numpy(), s=0.001)
    plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
    plt.text(0.5, 0, 'loss=%s' % (loss.item()),
             fontdict={'size': 20, 'color': 'red'})
    plt.pause(0.005)


def train(epochs=1, learn_rate=0.01):
    for epoch in range(epochs):
        output = inputs.mv(w)
        loss = (output - target).pow(2).sum() / 100000
        loss.backward()
        w.data -= learn_rate * w.grad
        w.grad.zero_()  # 将模型的参数梯度初始化为0
        if epoch % 100 == 0:
            draw(output, loss)  # 重新绘制
    return w, loss


start = perf_counter()
w, loss = train(5000, learn_rate=1e-4)
finish = perf_counter()
time = finish - start
print("计算时间:%s" % time)
print("final loss:", loss.item())
print("weights:", w.data)

在这里插入图片描述

在这里插入图片描述

计算时间:32.38653709999926
final loss: 0.12629716098308563
weights: tensor([0.9559, 0.6240], device='cuda:0')
import torch
import matplotlib.pyplot as plt
from time import perf_counter

# 生成矩阵X[5,2]


def Produce_X(x):
    x0 = torch.ones(x.numpy().size)
    X = torch.stack((x, x0), dim=1)
    return X


x = torch.linspace(-3, 3, 100000)
y = x + 1.2 * torch.rand(x.size())
X = Produce_X(x)
# inputs = X
# target = y
# 生成向量w[2,1]
w = torch.rand(2)

# --------------cuda-----------------
# CUDA = torch.cuda.is_available()
# if CUDA:
#     inputs = X.cuda()
#     target = y.cuda()
#     w = w.cuda()
#     w.requires_grad = True
# else:
inputs = X
target = y
w = w
w.requires_grad = True


# --------------cuda-----------------
def draw(output, loss):
    plt.cla()  # 清空画布
#     if CUDA:
#         output = output.cpu()  # 还原为cpu类型才能进行绘图
    plt.scatter(x.numpy(), y.numpy(), s=0.001)
    plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
    plt.text(0.5, 0, 'loss=%s' % (loss.item()),
             fontdict={'size': 20, 'color': 'red'})
    plt.pause(0.005)


def train(epochs=1, learn_rate=0.01):
    for epoch in range(epochs):
        output = inputs.mv(w)
        loss = (output - target).pow(2).sum() / 100000
        loss.backward()
        w.data -= learn_rate * w.grad
        w.grad.zero_()  # 将模型的参数梯度初始化为0
        if epoch % 100 == 0:
            draw(output, loss)  # 重新绘制
    return w, loss


start = perf_counter()
w, loss = train(5000, learn_rate=1e-4)
finish = perf_counter()
time = finish - start
print("计算时间:%s" % time)
print("final loss:", loss.item())
print("weights:", w.data)

在这里插入图片描述

在这里插入图片描述

计算时间:36.91037469999992
final loss: 0.14871178567409515
weights: tensor([0.9556, 0.4466])

人工神经元

# 导入库
import torch
import matplotlib.pyplot as plt
from torch import nn, optim
from time import perf_counter

x = torch.linspace(-3, 3, 100000)
x = torch.unsqueeze(x, dim=1)  # 在第一维处增加一个维度
y = x + 1.2 * torch.rand(x.size())
print(x.size())


# 回归类
class LineRegression(nn.Module):
    def __init__(self):
        super(LineRegression, self).__init__()
        self.linear = nn.Linear(1, 1)

    def forward(self, x):
        out = self.linear(x)
        return out


CUDA = torch.cuda.is_available()
if CUDA:
    LR_model = LineRegression().cuda()
    inputs = x.cuda()
    target = y.cuda()
else:
    LR_model = LineRegression()
    inputs = x
    target = y

criterion = nn.MSELoss()
optimizer = optim.SGD(LR_model.parameters(), lr=1e-4)


def draw(output, loss):
    plt.cla()  # 清空画布
    if CUDA:
        output = output.cpu()  # 还原为cpu类型才能进行绘图
    plt.scatter(x.numpy(), y.numpy(), s=0.001)
    plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
    plt.text(0.5, 0, 'loss=%s' % (loss.item()),
             fontdict={'size': 20, 'color': 'red'})
    plt.pause(0.005)


def train(model, criterion, optimizer, epochs):
    for epoch in range(epochs):
        output = model(inputs)
        loss = criterion(output, target)
        optimizer.zero_grad()  # 清空权值
        loss.backward()
        optimizer.step()  # 权值更新
        if epoch % 80 == 0:
            draw(output, loss)
    return model, loss


start = perf_counter()
LR_model, loss = train(LR_model, criterion, optimizer, 5000)
finish = perf_counter()
time = finish - start
print("计算时间:%s" % time)
print("final loss:", loss.item())
print("weights:", list(LR_model.parameters()))

torch.Size([100000, 1])

在这里插入图片描述

在这里插入图片描述

计算时间:41.9481209999999
final loss: 0.1268048733472824
weights: [Parameter containing:
tensor([[0.9967]], device='cuda:0', requires_grad=True), Parameter containing:
tensor([0.6864], device='cuda:0', requires_grad=True)]
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Shine.Zhang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值