mobileNetV2 by pytorch

一、记录

1、首先检查安装的pytorch是否带cuda,如果没安装,就百度搜索安装方法,因为我是笔记本电脑,比较慢,我是在网站包里下载并安装的,包的地址:https://download.pytorch.org/whl/torch_stable.html
2、tensorboard老是打不开,百度了一下原来是跑模型生成的参数文件的位置,是在当前目录下,所以用tensorboard --logdir=E:\myself\实验室\目标检测\project\DeepLizard-pytorch–master\DeepLizard-pytorch–master\runs来打开,tensorboard --logdir=your path,参数文件是在当前目录下生成,所以从你的文件目录入手,生成的参数文件大概长这样
在这里插入图片描述
3、跑代码的时候为了运算快一点,就用了cuda,为了方便检测gpu的使用性能,window用户从这里查看
在这里插入图片描述
但是,cuda只有2g,所以batch一旦过大,就内存溢出,所以就把batch设置得很小=10
4、刚开始训练为了保障没错,就把参数设置得很小,迭代一次,看报不报错,如果不报错,就迭代100次以上直到网络收敛。

二、待完成

1、为了计算迭代100次可能消耗的时间,我应该再计算一下迭代5次耗时多久,这样就可以估量100次耗时多久,这个需要自己找代码实现。
2、再理解理解教程简易版本的模型,摸索摸索,然后在自己的模型上迭代。
3、理解迭代的次数,生成的loss和accuracy到底是一批还是一层?
4、mobileNetV2是分类模型,所以转换数据集比较简单,本人方向是目标检测,如何用自己的数据集打标签生成所需的xml或者json或者txt文件,然后读取转换加载呢?

三、代码

这段代码是将数据集Fashion转成图片,使用一次即可

#将FashionMNIST的下的文件解压成图片,并生成对应的train.txt文件和test.txt文件
import os
from skimage import io
import torchvision.datasets.mnist as mnist

root="./data/FashionMNIST"
train_set = (
    mnist.read_image_file(os.path.join(root, 'train-images-idx3-ubyte')),
    mnist.read_label_file(os.path.join(root, 'train-labels-idx1-ubyte'))
        )
test_set = (
    mnist.read_image_file(os.path.join(root, 't10k-images-idx3-ubyte')),
    mnist.read_label_file(os.path.join(root, 't10k-labels-idx1-ubyte'))
        )
print("training set :",train_set[0].size())
print("test set :",test_set[0].size())

def convert_to_img(train=True):
    if(train):
        f=open(root+'/train.txt','w')
        data_path=root+'/train/'
        if(not os.path.exists(data_path)):
            os.makedirs(data_path)
        for i, (img,label) in enumerate(zip(train_set[0],train_set[1])):
            img_path=data_path+str(i)+'.jpg'
            io.imsave(img_path,img.numpy())
            f.write(img_path+' '+str(int(label))+'\n')
        f.close()
    else:
        f = open(root + '/test.txt', 'w')
        data_path = root + '/test/'
        if (not os.path.exists(data_path)):
            os.makedirs(data_path)
        for i, (img,label) in enumerate(zip(test_set[0],test_set[1])):
            img_path = data_path+ str(i) + '.jpg'
            io.imsave(img_path, img.numpy())
            f.write(img_path + ' ' + str(int(label)) + '\n')
        f.close()

convert_to_img(True)
convert_to_img(False)
#建立mobileNetV2网络
import torch
import torch.nn as nn
import torch.nn.functional as F


class Block(nn.Module):
    '''expand + depthwise + pointwise'''
    def __init__(self, in_planes, out_planes, expansion, stride):
        super(Block, self).__init__()
        self.stride = stride

        planes = expansion * in_planes
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, 
                               stride=1, padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, 
                               stride=stride, padding=1, groups=planes, 
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, 
                               stride=1, padding=0, bias=False)
        self.bn3 = nn.BatchNorm2d(out_planes)

        self.shortcut = nn.Sequential()
        if stride == 1 and in_planes != out_planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, out_planes, kernel_size=1, 
                          stride=1, padding=0, bias=False),
                nn.BatchNorm2d(out_planes),
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out = out + self.shortcut(x) if self.stride==1 else out
        return out


class MobileNetV2(nn.Module):
    # (expansion, out_planes, num_blocks, stride)
    cfg = [(1,  16, 1, 1),
           (6,  24, 2, 1),  # NOTE: change stride 2 -> 1 for CIFAR10
           (6,  32, 3, 2),
           (6,  64, 4, 2),
           (6,  96, 3, 1),
           (6, 160, 3, 2),
           (6, 320, 1, 1)]

    def __init__(self, num_classes=10):
        super(MobileNetV2, self).__init__()
        # NOTE: change conv1 stride 2 -> 1 for CIFAR10
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, 
                               padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(32)
        self.layers = self._make_layers(in_planes=32)
        self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, 
                               padding=0, bias=False)
        self.bn2 = nn.BatchNorm2d(1280)
        self.linear = nn.Linear(1280, num_classes)

    def _make_layers(self, in_planes):
        layers = []
        for expansion, out_planes, num_blocks, stride in self.cfg:
            strides = [stride] + [1]*(num_blocks-1)
            for stride in strides:
                layers.append(
                    Block(in_planes, out_planes, expansion, stride))
                in_planes = out_planes
        return nn.Sequential(*layers)

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.layers(out)
        out = F.relu(self.bn2(self.conv2(out)))
        # NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out
#加载数据集
# coding: utf-8
import torch
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image

root = "./data/FashionMNIST"

# -----------------ready the dataset--------------------------
def default_loader(path):
    return Image.open(path).convert('RGB')
class MyDataset(Dataset):
    def __init__(self, txt, transform=None, target_transform=None, loader=default_loader):
        fh = open(txt, 'r')
        imgs = []
        for line in fh:
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            imgs.append((words[0],int(words[1])))
        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = self.loader(fn)
        if self.transform is not None:
            img = self.transform(img)
        return img,label

    def __len__(self):
        return len(self.imgs)

train_data=MyDataset(txt=root+'/train.txt', transform=transforms.ToTensor())
test_data=MyDataset(txt=root+'/test.txt', transform=transforms.ToTensor())

#train_data 和test_data包含多有的训练与测试数据,调用DataLoader批量加载
#train_loader = DataLoader(dataset=train_set, batch_size=64, shuffle=True)
#test_loader = DataLoader(dataset=test_set, batch_size=64)
#定义函数==用于计算accuracy
def get_num_correct(preds,labels):
    return preds.argmax(dim=1).eq(labels).sum().item()
#打开cuda
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#低成本训练,用于检测网络出错
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import torchvision
import torchvision.transforms as transforms

torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True)  # 这里并不是必须的,默认情况下是打开的

# 创建网络实例
network = MobileNetV2().to(device)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=10)
optimizer = optim.Adam(network.parameters(), lr=0.01)
for epoch in range(2):
    total_loss = 0
    total_correct = 0
    for batch in train_loader:   # Get batch
        images, labels =batch
        images = images.to(device)
        labels = labels.to(device)
        preds = network(images)
        loss = F.cross_entropy(preds, labels)
        optimizer.zero_grad()  #告诉优化器把梯度属性中权重的梯度归零,否则pytorch会累积梯度
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
        total_correct += get_num_correct(preds, labels)
print("epoch:",epoch,"loss:",total_loss,"total_correct:",total_correct)

结果:

epoch: 1 loss: 2328.4014094015583 total_correct: 51539
accuracy = total_correct/len(train_data)
print("accuracy:",accuracy)

结果:

accuracy: 0.8589833333333333
#高成本训练,用于tensorboard可视化
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms

from torch.utils.tensorboard import SummaryWriter

torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True)
batch_size_list = [10]
lr_list = [.01]
for batch_size in batch_size_list:
    for lr in lr_list:
        network = MobileNetV2().to(device)
        train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
        images, labels = next(iter(train_loader))
        images = images.to(device)
        labels = labels.to(device)
        grid = torchvision.utils.make_grid(images)   # 创建能在tensorboard中查看的图像网格

        comment = f'batch_size={batch_size} lr ={lr}'
        tb = SummaryWriter(comment=comment)   # 在Summary Writer添加该注释,可帮助我们在tensorboard中唯一地识别该表示
        tb.add_image('images', grid)  # 将一批图像放在grid中进行显示
        tb.add_graph(network, images)   # 在tensorboard中看见网络结构的可视化图
        optimizer = optim.Adam(network.parameters(), lr=lr)

        for epoch in range(5):
    
            total_loss = 0
            total_correct = 0
    
            for batch in train_loader:    # Get Batch
                images, labels = batch
                images = images.to(device)
                labels = labels.to(device)
                preds = network(images) # Pass Batch
                loss = F.cross_entropy(preds, labels)  # Calculate loss
        
                optimizer.zero_grad()    # 梯度清零,否则会累加
                loss.backward()     # Calculate Gradients
                optimizer.step()    # Update Weights
        
                #total_loss += loss.item()
                total_loss += loss.item()*batch_size # 在对不同批次下的训练进行比较时,这样做可使结果更具有可比性
                total_correct += get_num_correct(preds, labels)
        
            tb.add_scalar("Loss", total_loss, epoch)
            tb.add_scalar("Number Correct", total_correct, epoch)
            tb.add_scalar("Accuracy", total_correct/len(train_data), epoch)
            '''
            这种表达方式只能看单个层的偏置,权重,及其梯度的变化趋势,无法看到全部的
            tb.add_histogram('conv1.bias', network.conv1.bias, epoch)
            tb.add_histogram('conv1.weight', network.conv1.weight, epoch)
            tb.add_histogram('conv1.weight.grad', network.conv1.weight.grad, epoch)
            '''
            for name, weight in network.named_parameters():
                tb.add_histogram(name, weight, epoch)
                tb.add_histogram(f'{name}.grad', weight.grad, epoch)
            print("epoch:", epoch, "total_correct:", total_correct, "loss", total_loss)

tb.close()

参考:https://www.cnblogs.com/denny402/p/7520063.html
https://blog.csdn.net/sinat_42239797/article/details/90641659
https://blog.csdn.net/sinat_42239797/article/details/90641659
https://blog.csdn.net/sinat_42239797/article/details/90641659

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值