pytorch入门(二)

 

线性连接、拍平

Sequential

# 旧版(没用sequential)
class Net_10(nn.Module):
    def __init__(self):
        super(Net_10, self).__init__()
        self.conv1 = Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2)
        self.pool1 = MaxPool2d(2)
        self.conv2 = Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2)
        self.pool2 = MaxPool2d(2)
        self.conv3 = Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2)
        self.pool3 = MaxPool2d(2)
        self.flatten1 = Flatten()
        self.linear1 = Linear(1024, 128)
        self.linear2 = Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        print("-",x.shape)
        x = self.pool1(x)
        print("-",x.shape)

        x = self.conv2(x)
        print("-", x.shape)
        x = self.pool2(x)
        print("-", x.shape)

        x = self.conv3(x)
        print("-", x.shape)
        x = self.pool3(x)
        print("-", x.shape)

        x = self.flatten1(x)
        print("-", x.shape)
        x = self.linear1(x)
        print("-", x.shape)
        x = self.linear2(x)
        print("网络输出结果:",x.shape)
        return x

# sequential版本
class Net_10_seq(nn.Module):
    def __init__(self):
        super(Net_10_seq, self).__init__()

        self.model = Sequential(
            Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2),
            MaxPool2d(2),
            Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2),
            MaxPool2d(2),
            Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 128),
            Linear(128, 10)
        )

    def forward(self, x):
        x = self.model(x)
        print("网络输出结果:",x.shape)
        return x




n = Net_10()
nseq = Net_10_seq()
print(nseq)
# 测试-----
testdata = torch.ones([64, 3, 32, 32])
output = nseq(testdata)
print(output.shape)


'''
Net_10_seq(
  (model): Sequential(
    (0): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (4): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Flatten()
    (7): Linear(in_features=1024, out_features=128, bias=True)
    (8): Linear(in_features=128, out_features=10, bias=True)
  )
)
网络输出结果: torch.Size([64, 10])
torch.Size([64, 10])
'''


现有模型:

网络流图

testdata = torch.ones([64, 3, 32, 32])
output = nseq(testdata)
print(output.shape)
writers = SummaryWriter("logs8")
writers.add_graph(nseq, testdata) 
writers.close()

 1. add_graph(网络实例, 数据) 

2. 需要close才看的到

L1、MES损失与交叉熵

L1loss: ∑(X-Y)/n

MESloss: ∑(X-Y)^2/n

CrossEntropyLoss: -0.8+ln(exp(0.1)+exp(0.8)+exp(0.2)):  用于分类模型

loss1 = L1Loss()
X1 = torch.tensor([1,2,3], dtype=torch.float)
Y1 = torch.tensor([1,2,5],dtype=torch.float)
print(loss1(X1,Y1))

loss2 = MSELoss()
print(loss2(X1,Y1))


loss3 = nn.CrossEntropyLoss()
X = torch.tensor([0.1, 0.8, 0.2])
Y = torch.tensor([1],dtype=torch.long)
X = torch.reshape(X, (1, 3))
print(X.shape, Y.shape)  # torch.Size([1, 3]) torch.Size([1])
print(loss3(X,Y))

反向传播与优化器

optim = torch.optim.SGD(nseq.parameters(),lr=0.01)   # 定义优化器
for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:
        imgs, targets = data
        out = nseq(imgs)
        loss_ = loss3(out, targets)

        optim.zero_grad()  # 梯度清理
        loss_.backward()  # 反向传播
        optim.step()  # 下降

        # 累加器
        running_loss = running_loss+loss_
    print("epoch:",epoch," loss:",running_loss)
计算梯度:loss(out, targets)
优化器:利用梯度对参数进行修改
    1. 创建优化器时:params, ls
    2. optimizer.zero_grad()
    3. loss.backward()
    4. optimizer.step(closure)

现有模型torchvision.models

import torch
import torchvision.models
from  torchvision import models

# 1. 改造网络
# 2. 整理torch的包与使用方法

vgg16_false = torchvision.models.vgg16(pretrained=False)
vgg16_true = torchvision.models.vgg16(pretrained=True)

print(vgg16_true)
vgg16_true.classifier.add_module("add_linear", torch.nn.Linear(1000,10))
print("--------------")
print(vgg16_true)


'''运行片段
    (1): ReLU(inplace=True)
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace=True)
    (5): Dropout(p=0.5, inplace=False)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
    (add_linear): Linear(in_features=1000, out_features=10, bias=True)
  )
)
'''

一些细节

model.train()设置网络进入训练状态

model.eval()设置网络进入验证状态

只在drop、Normal一些训练与测试不同的网络层有作用

GPU训练cuda

import torch.cuda
import torch
import time

A = torch.ones([1,1,1,1]) # model、loss、imgs、targets
if torch.cuda.is_available():
    A = A.cuda()   

# 计算时间
start_time = time.time()
end_time = time.time()
print(end_time - start_time)

# gpu和cpu相差10倍

# 访问google免费的gpu

# 更简单的方法
# .to(device)
# 定义训练的设备
device = torch.device("cpu")   # cuda  、cuda:0
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
A = A.to(device)  # model\loss\imgs\targets,其中只有前两个需要另外赋值
    

完整的模型验证套路 

import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torch.utils.data import DataLoader
from torch import nn
import time
begintime = time.time()
writer = SummaryWriter("logs_train")

train_data = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=transforms.ToTensor(),
                                          download=True)
train_dataloader = DataLoader(dataset=train_data, batch_size=64, shuffle=False)
test_dataloader = DataLoader(dataset=test_data, batch_size=64, shuffle=False)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,1,2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(1024, 64),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x

net = Net()

# 交叉熵
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)

for i in range(15):
    for data in train_dataloader:
        imgs, target = data
        out = net(imgs)
        loss = loss_fn(out, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    losses = 0
    acces = 0
    for data in test_dataloader:
        imgs, target = data
        out = net(imgs)
        loss = loss_fn(out, target)
        losses = losses + loss

        acc = (out.argmax(1) == target).sum()
        acces = acces+acc

    print("-----------运行时间:", time.time() - begintime)
    print("损失:", losses, "\n正确率:",torch.true_divide(acces,  len(test_data)))

    writer.add_scalar("test_loss", losses, i)
    writer.add_scalar("test_acc", acces, i)
    torch.save(net, "net10_{}.pth".format(i))

writer.close()


# Colab打不开怎么办?

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值