MobileNet 实现对CIFAR-10数据集训练(pytorch)

import torch

import torchvision

from torchvision import datasets, transforms

from torch.utils.data import DataLoader

from torch import nn

import torch.nn.functional as F

import numpy as np

from torch.utils.data import Dataset, DataLoader, TensorDataset


 

train_dataset =   datasets.CIFAR10('./cifar10_data/', train=True, download=True,

                       transform=transforms.Compose([

                           #transforms.ToPILImage(),

                           #transforms.Grayscale(),

                           transforms.ToTensor()

                       ]))

 

train_loader = DataLoader(dataset = train_dataset, batch_size = 500, shuffle = True)

 

class Block(nn.Module):

    '''Depthwise conv + Pointwise conv'''

    def __init__(self, in_planes, out_planes, stride=1):

        super(Block, self).__init__()

        self.conv1 = nn.Conv2d\

            (in_planes, in_planes, kernel_size=3, stride=stride, 

             padding=1, groups=in_planes, bias=False)

        self.bn1 = nn.BatchNorm2d(in_planes)

        self.conv2 = nn.Conv2d\

            (in_planes, out_planes, kernel_size=1, 

            stride=1, padding=0, bias=False)

        self.bn2 = nn.BatchNorm2d(out_planes)

 

    def forward(self, x):

        out = F.relu(self.bn1(self.conv1(x)))

        out = F.relu(self.bn2(self.conv2(out)))

        return out


 

class MobileNet(nn.Module):

    # (128,2) means conv planes=128, conv stride=2, 

    # by default conv stride=1

    cfg = [64, (128,2), 128, (256,2), 256, (512,2), 

           512, 512, 512, 512, 512, (1024,2), 1024]

 

    def __init__(self, num_classes=10):

        super(MobileNet, self).__init__()

        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, 

            stride=1, padding=1, bias=False)

        self.bn1 = nn.BatchNorm2d(32)

        self.layers = self._make_layers(in_planes=32)

        self.linear = nn.Linear(1024, num_classes)

 

    def _make_layers(self, in_planes):

        layers = []

        for x in self.cfg:

            out_planes = x if isinstance(x, int) else x[0]

            stride = 1 if isinstance(x, int) else x[1]

            layers.append(Block(in_planes, out_planes, stride))

            in_planes = out_planes

        return nn.Sequential(*layers)

 

    def forward(self, x):

        out = F.relu(self.bn1(self.conv1(x)))

        out = self.layers(out)

        out = F.avg_pool2d(out, 2)

        out = out.view(out.size(0), -1)

        out = self.linear(out)

        return out

 

epoch = 5

 

net = MobileNet()

cost = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(net.parameters(), lr=0.0005)

 

for k in range(epoch):

    sum_loss = 0.0

    train_correct = 0

    for i, data in enumerate(train_loader, 0):

        inputs, labels = data

        optimizer.zero_grad()

        outputs = net(inputs)

 

        loss = cost(outputs, labels)

        loss.backward()

        optimizer.step()

 

        print(loss)

        _, id = torch.max(outputs.data, 1) 

        sum_loss += loss.data

        train_correct += torch.sum(id == labels.data)

        #print('[%d,%d] loss:%.03f' % (k + 1, k, sum_loss / len(train_loader)))

    print('        correct:%.03f%%' % (100 * train_correct / len(train_dataset)))


 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用PyTorch实现的RNN对CIFAR-10图像分类任务的代码示例: ```python import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import torchvision import torchvision.transforms as transforms # 定义超参数 input_size = 3 * 32 * 32 hidden_size = 100 num_classes = 10 num_epochs = 5 batch_size = 100 learning_rate = 0.001 # 加载CIFAR-10数据集 transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2) test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) # 定义RNN模型 class RNN(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(RNN, self).__init__() self.hidden_size = hidden_size self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes) def forward(self, x): h0 = Variable(torch.zeros(1, x.size(0), self.hidden_size)) out, _ = self.rnn(x, h0) out = self.fc(out[:, -1, :]) return out rnn = RNN(input_size, hidden_size, num_classes) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(rnn.parameters(), lr=learning_rate) # 训练模型 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): images = Variable(images.view(-1, 32, 32 * 3)) labels = Variable(labels) # 前向传播 outputs = rnn(images) loss = criterion(outputs, labels) # 反向传播和优化 optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 100 == 0: print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' % (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0])) # 测试模型 correct = 0 total = 0 for images, labels in test_loader: images = Variable(images.view(-1, 32, 32 * 3)) outputs = rnn(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() print('Accuracy of the RNN on the 10000 test images: %d %%' % (100 * correct / total)) ``` 这段代码使用了PyTorch的nn.RNN模块来实现RNN模型,并使用交叉熵损失函数和Adam优化器来训练模型。在训练过程中,代码使用了PyTorch的DataLoader来加载CIFAR-10数据集,并使用Variable来进行数据的自动求导。最后,代码计算了模型在测试集上的准确率。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值