pytorch 基础

1. 自定义dataset的获取

# import torch
from torch.utils.data import Dataset
from PIL import Image
import os


class MyDataset(Dataset):
    def __init__(self, root_path, image_dir):
        self.root_path = root_path
        self.image_dir = image_dir
        self.image_path = os.path.join(self.root_path, self.image_dir)
        self.image_list = os.listdir(self.image_path)

    def __getitem__(self, item):
        img = Image.open(os.path.join(self.image_path, self.image_list[item]))
        label = self.image_dir
        return img, label

    def __len__(self):
        return len(self.image_list)


if __name__ == "__main__":
    ants_dataset = MyDataset("dataset", "ants")
    bees_dataset = MyDataset("dataset", "bees")
    all_dataset = ants_dataset + bees_dataset
    print(len(all_dataset))
    image, label = all_dataset[55]
    print(label)
    image.show()

2. tensorboard

1. 安装tensorboard,如果无法使用tensorboard,请再次安装tensorboardX

pip install tensorboard

pip install tensorboardX

2. 将数据写入tensorboard

from torch.utils.tensorboard import SummaryWriter
import numpy as np
from PIL import Image

# 每次运行一次程序,均会向logs下写入数据;如果不想要以前的数据,在运行之前将logs目录下的文件删除
write = SummaryWriter("logs")
image_path = "dataset/ants/8124241_36b290d372.jpg"
image_pil = Image.open(image_path)
image_array = np.array(image_pil)

# 写入的图片数据是tensor或者numpy数据, 1表示该图片为全局第一张图片,dataformats指定为HWC,默认是CHW
write.add_image("ants", image_array, 1, dataformats="HWC")

for i in range(10):
    # y=x 为图的标题, 2*i为x轴的数据,i为y轴的数据
    write.add_scalar("y=x", 2*i, i)

write.close()

3. 使用web显示写入的数据

在终端输入:tensorboard --logdir=logs --port=6007  

port不指定默认为6006,如果多人使用服务器查看tensorboard,不指定则会冲突

输出如下,点击http://localhost:6007打开网页查看

在终端输入:tensorboard --logdir=logs --port=6007   --samples_per_plugin=images=100

在显示图片是可以显示100张图片,默认只能显示10张

3. 数据的transform

from PIL import Image
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter

write = SummaryWriter("logs")
image_path = "dataset/ants/8398478_50ef10c47a.jpg"
img = Image.open(image_path)
# Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor
# Converts a PIL Image or numpy.ndarray (H x W x C) in the range
# [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
trans_tensor = transforms.ToTensor()
tensor_img = trans_tensor(img)
write.add_image("tensor", tensor_img, 1)

# normalize
# output[channel] = (input[channel] - mean[channel]) / std[channel], 需要自己计算平均值和标准差带入
print(tensor_img[0][0][0])
trans_norm = transforms.Normalize([0, 0, 0], [1, 1, 1])
norm_img = trans_norm(tensor_img)
print(norm_img[0][0][0])
write.add_image("norm", norm_img, 2)

# resize
# input PIL image
trans_resize = transforms.Resize((512, 512))
resize_img = trans_resize(img)
resize_img = trans_tensor(resize_img)
write.add_image("resize", resize_img, 0)

# compose
trans_cmp = transforms.Compose([transforms.Resize(512), transforms.ToTensor()])
cmp_img = trans_cmp(img)
write.add_image("resize", cmp_img, 1)

write.close()

4. pytorch提供的数据集的使用,例如cifar

import torch
import torchvision
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter

transform = transforms.Compose([transforms.ToTensor()])

cifar10_train = torchvision.datasets.CIFAR10("./cifar10", transform=transform, train=True, download=True)
cifar10_test = torchvision.datasets.CIFAR10("./cifar10", transform=transform, train=False, download=True)

write = SummaryWriter("logs")

for i in range(10):
    image, label = cifar10_train[i]
    write.add_image("cifar10", image, i)

write.close()

5. 数据的加载DataLoader

import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter

dataset_test = torchvision.datasets.CIFAR10("./cifar10", train=False, transform=transforms.ToTensor(), download=True)

# num_works:使用多少个子线程加载数据;0表示只使用主进程加载。
# drop_last:True表示,如果最后一次加载数据大小没有达到batch_size,则这次加载将丢弃
data_loader = DataLoader(dataset_test, batch_size=64, shuffle=True, num_workers=0, drop_last=True)

writer = SummaryWriter("logs")

step = 0
for data in data_loader:
    images, labels = data
    writer.add_images("cifar10_drop_T", images, step)
    step = step + 1

writer.close()

6. 网络模型的框架

import torch
from torch import nn

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()

    def forward(self, input):
        output = input + 1
        return output

model = MyModel()
input = torch.tensor(1.0)
output = model(input)
print(output)

7. 卷积初体验

import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

dataset_test = torchvision.datasets.CIFAR10("./cifar10", train=False, transform=transforms.ToTensor(), download=True)

data_loader = DataLoader(dataset_test, batch_size=64, drop_last=True)

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0)

    def forward(self, input):
        return self.conv1(input)

model = MyModel()

writer = SummaryWriter("logs")

step = 0
for data in data_loader:
    images, labels = data
    writer.add_images("input_img", images, step)
    out_images = model(images)
    out_images = out_images.reshape((-1, 3, 30, 30))
    print(out_images.shape)
    writer.add_images("output_img", out_images, step)
    step = step + 1

writer.close()

8. sequential的使用

import torch
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

train_data = torchvision.datasets.CIFAR10("cifar10", train=False, transform=transforms.ToTensor, download=True)

train_load = DataLoader(train_data, batch_size=64)

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.seq1 = Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=1),
                               nn.Flatten(),
                               nn.Linear(57600, 10)
                               )

    def forward(self, x):
        x = self.seq1(x)
        return x


writer = SummaryWriter("logs")
mymodel = MyModel()
print(mymodel)
input = torch.ones((64, 3, 32, 32))

print(input.shape)
out_data = mymodel(input)
print(out_data.shape)
writer.add_graph(mymodel, input)

writer.close()

9. loss函数的计算

import torch
from torch.nn import L1Loss

in_data = torch.tensor(
    [[2, 2],
     [3, 3]], dtype=torch.float32
)

target = torch.tensor(
    [[3, 3],
     [3, 4]], dtype=torch.float32
)

# reduction="mean",默认参数,所有元素相减,再求和,在除以元素个数(该计算中元素个数为4)
# reduction="sum",默认参数,所有元素相减,再求和
loss = L1Loss(reduction="mean")
output = loss(in_data, target)
print(output.shape)
print(output)

10. 完整的训练过程

import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
import os

data_transform = torchvision.transforms.ToTensor()
train_data = torchvision.datasets.CIFAR10("./cifar10", train=True, transform=data_transform, download=True)
test_data = torchvision.datasets.CIFAR10("./cifar10", train=False, transform=data_transform, download=True)

train_dataloader = DataLoader(train_data, batch_size=64, shuffle=True, drop_last=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True, drop_last=True)

if not os.path.exists("./weights"):
    os.mkdir("./weights")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device useing: {}".format(device))

class CifarModel(nn.Module):
    def __init__(self):
        super(CifarModel, self).__init__()
        self.model = nn.Sequential(
            # in:3*32*32    out:32*32*32
            nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:32*32*32    out:32*16*16
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            # in:32*16*16    out:64*16*16
            nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:64*16*16    out:64*8*8
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            # in:64*8*8    out:128*8*8
            nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:128*8*8    out:128*4*4
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            # in:128*4*4    out:2048
            nn.Flatten(),
            nn.Linear(2048, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x

cifar_model = CifarModel()
cifar_model.to(device)
# print(cifar_model)

# in_data = torch.ones((64, 3, 32, 32))
# print(in_data.shape)
# out_data = cifar_model(in_data)
# print(out_data.shape)

# 定义损失函数,和优化器
loss_fn = nn.CrossEntropyLoss()
loss_fn.to(device)
optimizer = torch.optim.SGD(cifar_model.parameters(), lr=0.01)

writer = SummaryWriter("./logs")
train_epoch = 1
train_total_step = 0
test_total_step = 0

for i in range(train_epoch):
    print("-----------start epoch {} train -------------".format(i+1))
    # 记入训练开始时间
    start_time = time.time()
    # 开始训练
    cifar_model.train()
    for imgs, targets in train_dataloader:
        # 前向传播
        imgs = imgs.to(device)
        targets = targets.to(device)
        outputs = cifar_model(imgs)
        loss = loss_fn(outputs, targets)

        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_total_step = train_total_step + 1
        if train_total_step % 100 == 0:
            print("train step:{}, loss:{}".format(train_total_step, loss.item()))
            writer.add_scalar("train loss", loss.item(), train_total_step)

    # 开始验证
    cifar_model.eval()
    test_total_loss = 0
    test_total_accuracy = 0
    test_total_num = 0
    with torch.no_grad():
        for imgs, targets in test_dataloader:
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = cifar_model(imgs)
            loss = loss_fn(outputs, targets)

            # 计算正确率
            accuracy = (torch.argmax(outputs, 1) == targets).sum()
            test_total_loss = test_total_loss + loss.item()
            test_total_accuracy = test_total_accuracy + accuracy
            test_total_num = test_total_num + len(targets)

    test_total_step = test_total_step + 1
    print("test accuracy:{}; test loss:{};".format(test_total_accuracy/test_total_num, test_total_loss))
    writer.add_scalar("test accuracy:{}", test_total_accuracy/test_total_num, test_total_step)
    writer.add_scalar("test accuracy:{}", test_total_loss, test_total_step)

    # 保存模型
    torch.save(cifar_model, "./weights/cifar_{}.pth".format(test_total_step))

    # 计算一个epoch的花费时间
    end_time = time.time()
    print("cost time:{}".format(end_time - start_time))
writer.close()

11. 完整的测试过程

import torch
import torchvision
from PIL import Image
from torch import nn


class CifarModel(nn.Module):
    def __init__(self):
        super(CifarModel, self).__init__()
        self.model = nn.Sequential(
            # in:3*32*32    out:32*32*32
            nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:32*32*32    out:32*16*16
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            # in:32*16*16    out:64*16*16
            nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:64*16*16    out:64*8*8
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            # in:64*8*8    out:128*8*8
            nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:128*8*8    out:128*4*4
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            # in:128*4*4    out:2048
            nn.Flatten(),
            nn.Linear(2048, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x


image_path = "dataset/feiji.jpg"
image = Image.open(image_path)
image = image.convert("RGB")

transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)),
                                            torchvision.transforms.ToTensor()])

image = transform(image)
print(image.shape)

cifar_test = torch.load("./cifar_87.pth", map_location=torch.device("cpu"))

image = torch.reshape(image, (1, 3, 32, 32))

cifar_test.eval()
with torch.no_grad():
    output = cifar_test(image)

print(output)
print(torch.argmax(output, 1))

12. 网络的修改

from torch import nn


class CifarModel(nn.Module):
    def __init__(self):
        super(CifarModel, self).__init__()
        self.seq_model = nn.Sequential(
            # in:3*32*32    out:32*32*32
            nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:32*32*32    out:32*16*16
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            # in:32*16*16    out:64*16*16
            nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:64*16*16    out:64*8*8
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            # in:64*8*8    out:128*8*8
            nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            # in:128*8*8    out:128*4*4
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            # in:128*4*4    out:2048
            nn.Flatten(),
            nn.Linear(2048, 10)
        )

    def forward(self, x):
        x = self.seq_model(x)
        return x

cifar = CifarModel()

# 在末尾添加一个网络层
cifar.add_module("add_linear", nn.Linear(10, 100))

cifar.add_linear = nn.Linear(10, 200)
for name, value in cifar.named_parameters():
    print("name:{}; value grade:{}".format(name, value.requires_grad))
    if name == "add_linear":
        value.requires_grad = False

# filter(lambda name, value: id(p) not in ignored_params, cifar.parameters())


12. 网络的参数

in:

cifar_model = CifarModel()
print(cifar_model)

 out:

CifarModel(
  (model): Sequential(
    (0): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): ReLU()
    (2): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
    (3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU()
    (5): AvgPool2d(kernel_size=3, stride=2, padding=1)
    (6): Conv2d(64, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (7): ReLU()
    (8): AvgPool2d(kernel_size=3, stride=2, padding=1)
    (9): Flatten(start_dim=1, end_dim=-1)
    (10): Linear(in_features=2048, out_features=10, bias=True)
  )
)

 查看模型的参数

for name, value in cifar_model.named_parameters():
    print("name:{}; value grade:{}".format(name, value.requires_grad))

 

模型的两种保存方式

    # 保存模型结构和模型权重
    torch.save(cifar_model, "./weights/cifar_{}.pth".format(test_total_step))
    # 保存模型的权重
    torch.save(cifar_model.state_dict(), "./weights/cifar_dict_{}.pth".format(test_total_step))

模型的权重的两种加载方式

# 模型中保存了模型结构,直接加载
cifar_test = torch.load("./weights/cifar_1.pth")

# 模型中只有权重,torch.load只会加载生成顺序字典
cifar_weight = torch.load("./weights/cifar_dict_1.pth")
# 如果要修改模型参数,这里可以新建一个顺序字典并赋值需要的权重,然后传递给网络
cifar_test = CifarModel()
cifar_test.load_state_dict(cifar_weight)

# 顺序字典的使用
from collections import OrderedDict
target_state = OrderedDict()
for k, v in cifar_test.items():
  print(k)
  if k != 'classifier.weight' and k != 'classifier.bias':
  v.requires_grad = False
  target_state[k] = v     

顺序字典的访问

in:
for k in cifar_test2:
    print(data)

out:
model.0.weight
model.0.bias
model.3.weight
model.3.bias
model.6.weight
model.6.bias
model.10.weight
model.10.bias

in:
for k,v in cifar_test2.items():
    print(k)
    print(v)

out:
model.0.weight
tensor([[[[-0.1044,  0.0435, -0.0030, -0.1005,  0.0612],
          [-0.0222,  0.0845, -0.0075,  0.0084, -0.0196],
          [ 0.0661, -0.1093,  0.0391, -0.1094,  0.0351],
          [ 0.0936,  0.0831,  0.0499, -0.0364,  0.0417],
          [-0.0943, -0.1099,  0.0055,  0.0313, -0.0098]],

         [[-0.0284, -0.0818, -0.0304, -0.1030, -0.0512],
          [ 0.0463,  0.0858,  0.0560,  0.0147, -0.0257],
          [ 0.0028,  0.0647,  0.0102, -0.0844,  0.0187],
          [-0.1099, -0.0358, -0.1116, -0.1080, -0.0171],
          [-0.0461, -0.0222, -0.0421, -0.0376, -0.0739]],

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值