365天深度学习训练营-第P2周:彩色图片识别心得体会

import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision
device=torch.device('cuda'if torch.cuda.is_available()else 'cpu')
print(device)
#上面是启用gpu,这样以后可以进行更快速的计算
train_ds=torchvision.datasets.CIFAR10(
    'data',
    train=True,
    transform=torchvision.transforms.ToTensor(),
    download=True
)
test_ds=torchvision.datasets.CIFAR10(
    'data',
    train=False,
    transform=torchvision.transforms.ToTensor(),
    download=True
)
#上面是启用torchvision.datasets.CIFAR10,也就是加载里面的数据集中的CIFAR10,里面主要是将数据内容转化成torch常用的格式tensor,因为数据集我没有,选择下载,但是测试集不用训练
batch_size=32
#设置超参数batch_size,这样这个变量以后有需求的话,可以进行赋值变更,下面如果需要输入对应参数,用batch_size来表示
train_dl=torch.utils.data.DataLoader(
    train_ds,
    batch_size=batch_size,
    shuffle=True
)
test_dl=torch.utils.data.DataLoader(
    test_ds,
    batch_size=batch_size,
    shuffle=False
)
#用torch.utils.data.DataLoader构建带有批次的数据集,相当于把之前一大块数据缕了一下,改成一批批的,但是要求训练集对数据要进行打乱,也就是给模型训练增加难度,让模型泛化能力强,适应复杂情况,测试集不用,毕竟只是验证模型的性能是否可以
imgs,labels=next(iter(train_dl))
print(imgs.shape)
import numpy as np
plt.figure(figsize=(20,10))
for i,imgs in enumerate(imgs[:20]):

    npimg=imgs.numpy().transpose((1,2,0))

    plt.subplot(2,10,i+1)
    plt.imshow(npimg,cmap=plt.cm.binary)
    plt.axis('off')
#plt.imshow(imagesize,imagesize,channels),我们通过numpy().transpose((1,2,0)先将imgs的格式由(channels,imagesize,imagesize)改成对应的正确位置
#plt.imshow()是将数组的值以图片的形式展现出来,数组的值对应着不同的颜色深浅
import torch.nn.functional as F
num_classes=10
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1=nn.Conv2d(3,64,kernel_size=3)
        self.conv2=nn.Conv2d(64,64,kernel_size=3)
        self.conv3=nn.Conv2d(64,128,kernel_size=3)
        self.pool1=nn.MaxPool2d(kernel_size=2)
        self.pool2=nn.MaxPool2d(kernel_size=2)
        self.pool3=nn.MaxPool2d(kernel_size=2)
        self.fc1=nn.Linear(512,256)
        self.fc2=nn.Linear(256,num_classes)

    def forward(self,x):
        x=self.pool1(F.relu(self.conv1(x)))#torch.Size([32, 64, 15, 15])

        x=self.pool2(F.relu(self.conv2(x)))#torch.Size([32, 64, 6, 6])

        x=self.pool3(F.relu(self.conv3(x)))#torch.Size([32, 128, 2, 2])

        x=torch.flatten(x,start_dim=1)#torch.Size([32, 512])

        x=F.relu(self.fc1(x))#torch.Size([32, 256])

        x=self.fc2(x)#torch.Size([32, 10])

        return x
#flatten是将后面的三维合成一维
from torchinfo import summary
model=Model().to(device)
summary(model)
#用torchinfo.summary查看model的整个结构
loss_fn=nn.CrossEntropyLoss()
learn_rate=1e-2
opt=torch.optim.Adam(model.parameters(),lr=learn_rate)

def train(dataloader,model,loss_fn,optimizer):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)

    train_loss,train_acc=0,0
    for X,y in dataloader:
        X,y=X.to(device),y.to(device)
        pred=model(X)
        loss=loss_fn(pred,y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_acc+=(pred.argmax(1)==y).type(torch.float).sum().item()
        train_loss+=loss.item()

    train_acc/=size
    train_loss/=num_batches
    return train_acc,train_loss
#size为50000,表示总数为50000个,num_batches为50000/32,表示每个训练过程需要经过多少次迭代
def test(dataloader,model,loss_fn):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)
    test_acc,test_loss=0,0
    with torch.no_grad():
        for imgs,target in dataloader:
            imgs,target=imgs.to(device),target.to(device)
            target_pred=model(imgs)
            loss=loss_fn(target_pred,target)
            test_acc+=(target_pred.argmax(1)==target).type(torch.float).sum().item()
            test_loss+=loss.item()
        test_acc/=size
        test_loss/=num_batches
        return test_acc,test_loss

epochs=10
train_acc=[]
train_loss=[]
test_acc=[]
test_loss=[]

for epoch in range(epochs):
    model.train()
    epoch_train_acc,epoch_train_loss=train(train_dl,model,loss_fn,opt)

    model.eval()
    epoch_test_acc,epoch_test_loss=test(test_dl,model,loss_fn)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    template=('Epoch:{:2d},train_acc:{:.1f}%,train_loss:{:.3f},test_acc:{:.1f}%,test_loss:{:.3f}')
    print(template.format(epoch+1,epoch_train_acc*100,epoch_train_loss,epoch_test_acc*100,epoch_test_loss))
print('Done')

import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
plt.rcParams['figure.dpi']=100
epochs_range=range(epochs)
print(epochs_range)
plt.figure(figsize=(20,10))

plt.subplot(1,2,1)
plt.plot(epochs_range,train_acc,label='Training Accuracy')
plt.plot(epochs_range,test_acc,label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1,2,2)
plt.plot(epochs_range,train_loss,label='Training Loss')
plt.plot(epochs_range,test_loss,label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

 

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值