pytorch学习笔记_

leanring pytorch

河北工业大学人工智能与数据科学学院教师 线上课程学习笔记


Pytorch 建模流程

  1. prepare data
    • dataset dataloader
  2. design model using class
    • inherit from nn.module
  3. construct loss and optimizer
    • using Pytorch API
  4. training cycle + test
    • forward backward update

gradient descent

# 梯度下降
# prepare data
x_data = [1, 2, 3]
y_data = [2, 4, 6]
# 初始化w
w = 1

# 前馈
def forward(x):
    return x * w

# loss
def cost(xs, ys):
    cost = 0
    for x, y in zip(xs, ys):
        # 根据前馈, 计算预测值
        y_pred = forward(x)
        # loss = mse
        cost += (y - y_pred) ** 2
    return cost / len(xs)

# gradient
def gradient(xs, ys):
    grad = 0
    for x, y in zip(xs, ys):
        # 根据loss: mse -> 求导 -> 梯度的表达式为下面
        grad = 2 * x * (x * w - y)
    return grad / len(xs) # 求导的时候把1/N提出来了, 这里补上

# training cycle
print(f'训练前, 4的结果{forward(4)}')
for epoch in range(101):
    cost_val = cost(x_data, y_data)
    grad_val = gradient(x_data, y_data)
    w -= 0.01 * grad_val
    if epoch % 10 == 0:
        print(f'epoch: {epoch} w={w:.4f} loss={cost_val:.4f}')
print(f'训练后, 4的结果{forward(4)}')

BP

import torch

x_data = [1, 2, 3]
y_data = [2, 4, 6]

w = torch.tensor([1.0])
w.requires_grad = True


def forward(x):
    return x * w


def loss(x, y):
    y_pred = forward(x)
    return (y_pred - y) ** 2


print('predict before training', 4, forward(4).item())
for epoch in range(100):
    for x, y in zip(x_data, y_data):
        l = loss(x, y)  # 构建计算图
        l.backward()  # 计算w权重
        # 下面两步只想更新, 打印标量, 不想产生计算图, 但是若是tensor类型在做加法运算的时候, 会构建计算图
        # 因此需要`.data`取到标量值
        print('\tgrad:', x, y, w.grad.item())
        w.data = w.data - 0.01 * w.grad.data
        w.grad.data.zero_()

    print("progress:", epoch, l.item())
print('predict after training', 4, forward(4).item())

linear

import torch

# 1. prepare data
# 用mini-batch的时候, 需要将X, Y构造成3X1的Tensors
# attention: tensor必须是float类型, 不然矩阵乘法 报错如下
# RuntimeError: mat1 and mat2 must have the same dtype
x_data = torch.unsqueeze(torch.tensor([1.0, 2, 3]), dim=1)
y_data = torch.unsqueeze(torch.tensor([2.0, 4, 6]), dim=1)
print(x_data, y_data)

# 2. Design model using Class
# 构造计算图
class LinearModel(torch.nn.Module):
    def __init__(self, in_feature, out_feature):
        super(LinearModel, self).__init__() # 继承父类的构造
        self.linear = torch.nn.Linear(in_features=in_feature, out_features=out_feature)

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred


model = LinearModel(1, 1)
print(model)

# 3. Construct loss and optimizer
# 构造损失函数 和优化器
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


# 4. Training Cycle
for epoch in range(1000):
    # 前馈
    y_pred = model(x_data) # 计算yhat
    loss = criterion(y_pred, y_data)
    print(epoch, loss)

    optimizer.zero_grad() # 梯度归零
    # 反馈
    loss.backward()
    # 更新梯度
    optimizer.step() # update梯度

# output weight and bias
print(f'w={model.linear.weight.item()}')
print(f'b={model.linear.bias.item()}')

# test model
x_test = torch.unsqueeze(torch.Tensor([4.0]), dim=1) # test输入的是1X1
y_test = model(x_test)
print(f'y_pred:{y_test.item():.2f}') # item 直接输出值
print(f'y_pred:{y_test.data}') # data 输出1X1的tensor, 值同上

LogisticRegression

import torch
import torchvision
import torch.nn.functional as F
import torch


# 1. prepare data
x_data = torch.unsqueeze(torch.tensor([1.0, 2, 3]), dim=1)
y_data = torch.unsqueeze(torch.tensor([0.0, 0, 1]), dim=1)

# 2. model
class LogisticRegressionModel(torch.nn.Module):
    def __init__(self):
        super(LogisticRegressionModel, self).__init__()
        self.linear = torch.nn.Linear(1, 1)

    def forward(self, x):
        y_pread = torch.sigmoid(self.linear(x))
        return y_pread

model = LogisticRegressionModel()

# 3. loss and optim
# 二分类问题损失函数: 用交叉熵
criterion = torch.nn.BCELoss() # BinaryCrossEntropy
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

# 4. training
for epoch in range(1000):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    print(epoch, loss)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

# 5. test
import numpy as np
import matplotlib.pyplot as plt

x = np.linspace(0, 10, 200)
x_t = torch.unsqueeze(torch.Tensor(x), dim=1)
y_t = model(x_t)
y = y_t.data.numpy()
plt.plot(x, y)
plt.plot([0, 10], [0.5, 0.5], c='r') # 画一条直线
plt.xlabel('hour')
plt.ylabel('Probability of pass')
plt.grid()
plt.show()

multi features

import torch


# 8个特征
# class Model(torch.nn.Module):
#     def __init__(self):
#         super(Model, self).__init__()
#         self.linear = torch.nn.Linear(8, 1)
#         self.sigmoid = torch.sigmoid()
#
#     def forward(self, x):
#         x = self.sigmoid(self.linear(x))
#         return x
#
# model = Model(...)

import numpy as np
# 1. prepare data
xy = np.loadtxt(r'F:\ERIC_notebook\MachineLearning\learning_pytorch\dataset\diabetes.csv',
                skiprows=1, delimiter=',', dtype=np.float32)
x_data = torch.from_numpy(xy[:, :-1])
y_data = torch.from_numpy(xy[:, [-1]])
print(x_data.shape, y_data.shape)

import torch
# 2. model
class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(8, 6)
        self.linear2 = torch.nn.Linear(6, 4)
        self.linear3 = torch.nn.Linear(4, 1)
        self.activate = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.activate(self.linear1(x))
        x = self.activate(self.linear2(x))
        x = self.activate(self.linear3(x))
        # 如果激活函数用relu, 输出是0, 1之间, 如果输出是0, 在计算BCEloss的时候, 会计算ln0导致报错
        # 为了避免relu + BCEloss的组合报错, 在最后一层激活函数这里转换成用sigmoid
        return x


model = Model()

# 3. loss optim
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)

# 4. training
for epoch in range(100):
    # forward
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    print(epoch, loss)

    # backward
    optimizer.zero_grad()
    loss.backward()

    # update
    optimizer.step()

dataset & dataloader


import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import numpy


# Dataset 是抽象类, 只能被继承, 不能实例化, 需要实现下面三个魔术方法
# DataLoader 是可以实例化的
class DiabetesDataset(Dataset):
    def __init__(self, filepath):
        xy = np.loadtxt(filepath, skiprows=1, delimiter=',', dtype=np.float32)
        self.len = xy.shape[0]
        self.x_data = torch.from_numpy(xy[:, :-1])
        self.y_data = torch.from_numpy(xy[:, [-1]])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len


dataset = DiabetesDataset(r'F:\ERIC_notebook\MachineLearning\learning_pytorch\dataset\diabetes.csv')
train_loader = DataLoader(dataset=dataset, batch_size=32,
                          shuffle=True,
                          num_workers=2  # num_workers 进程数, 多线程在windows可能报错
                          )

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(8, 6)
        self.linear2 = torch.nn.Linear(6, 4)
        self.linear3 = torch.nn.Linear(4, 1)
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.sigmoid(self.linear1(x))
        x = self.sigmoid(self.linear2(x))
        x = self.sigmoid(self.linear3(x))
        return x


model = Model()

criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


# 由于上面在做dataloader的时候使用了num_workers, 调用多线程, 所以需要方main, 才能不报错
if __name__ == '__main__':
    for epoch in range(100):
        for i, data in enumerate(train_loader):
            # 1. prepare data
            inputs, labels = data
            # 2. forward
            y_pred = model(inputs)
            loss = criterion(y_pred, labels)
            print(epoch, i, loss.item())
            # 3. backward
            optimizer.zero_grad()
            loss.backward()
            # 4. update
            optimizer.step()

Softmax 实现MNIST

# ---Cross Entropy In Numpy
# import numpy as np
#
#
# y = np.array([1, 0, 0])
# z = np.array([0.2, 0.1, -0.1])
#
# y_pred = np.exp(z) / np.exp(z).sum()
# loss = -(y * np.log(y_pred)).sum()
# print(loss)
# print(-np.log(0.38))

# ------
# import torch
# # y = torch.LongTensor([0])
# y = torch.tensor([0])
# z = torch.tensor([[0.2, 0.1, 0.1]])
# criterion = torch.nn.CrossEntropyLoss()
# loss = criterion(z, y)
# print(loss.item())

# -----
# import torch
# criterion = torch.nn.CrossEntropyLoss()
# Y = torch.LongTensor([2, 0, 1])
# Y_pred1 = torch.Tensor([[0.1, 0.2, 0.9],
#                         [1.1, 0.1, 0.2],
#                         [0.2, 2.1, 0.1]])
# Y_pred2 = torch.Tensor([[0.8, 0.2, 0.3],
#                         [0.2, 0.3, 0.5],
#                         [0.2, 0.2, 0.5]])
# l1 = criterion(Y_pred1, Y)
# l2 = criterion(Y_pred2, Y)
# # 第一个预测, 与实际标签比较接近, 第二个预测不相似
# print('batch loss1', l1.item(), '\nbatch loss2', l2.item())


# ------
# MNIST
import torch
from torchvision import transforms  # 针对图像进行处理的工具
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F  # to use relu
import torch.optim as optim

# 1. prepare data
batch_size = 64
transform = transforms.Compose([
    # convert the PIL image to Tensor, PIL image 28X28 -> PytorchTensor 1X28X28
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.1307,), std=(0.3081,))]
)
train_dataset = datasets.MNIST(root='../dataset/mnist', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# 2. model
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = torch.nn.Linear(784, 512)
        self.l2 = torch.nn.Linear(512, 256)
        self.l3 = torch.nn.Linear(256, 128)
        self.l4 = torch.nn.Linear(128, 64)
        self.l5 = torch.nn.Linear(64, 10)

    def forward(self, x):
        x = x.view(-1, 784)  # 将tensor变形为(N, 784)的矩阵, 784=28X28
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)  # 最后一层需要通过softmax


model = Net()

# 3. loss optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


# 4. 将一轮循环封装为函数, 替换training cycle
def run_train(epoch):
    running_loss = 0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data
        # 梯度清零
        optimizer.zero_grad()

        # forward backward update
        ouputs = model(inputs)
        loss = criterion(ouputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()  # 取loss的时候需要用item, 不然会构建计算图
        if batch_idx % 300 == 299:  # 每300轮输出一次
            print(f'[{epoch + 1}, {batch_idx + 1}] loss:{running_loss / 300:.3f}')
            running_loss = 0.0


def run_test():
    correct = 0  # 正确多少
    total = 0  # 总数多少
    with torch.no_grad():  # with内代码不会计算梯度
        for data in test_loader:
            images, labels = data
            ouputs = model(images)
            # 按dim=1 拿出最大的索引, 返回对应位置的值, 对应位置的索引
            _, predicted = torch.max(ouputs.data, dim=1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
        print(f'Accuracy on test set: {100 * correct / total}')


if __name__ == '__main__':
    for epoch in range(10):
        run_train(epoch)
        run_test()

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值