AttributeError: ‘NoneType‘ object has no attribute ‘data‘

在深度学习训练网络过程中,我们常遇到如下的问题:属性错误(其中非类型的对象没有属性'data'),解决的方法主要是查看网络构造是否出现问题。

废话不多说,实践出真知。举个轻量级神经网络训练的例子,源代码包含三部分:网络构造、数据预处理加载以及网络训练。(使用的训练数据为ide可直接下载数据,需要的码友可以直接复现)

  1. 网络构造

import torch
import torch.nn as nn

use_cuda = torch.cuda.is_available()


class dw_conv(nn.Module):
    # 深度卷积
    def __init__(self, in_channels, out_channels, stride):
        super(dw_conv, self).__init__()
        self.dw_conv_3 = nn.Conv2d(
            in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, groups=in_channels, bias=False
        )
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        out = self.dw_conv_3(x)
        out = self.bn(out)
        out = self.relu(out)


class point_conv(nn.Module):
    def __init__(self, in_channels, out_channels):
        # 点卷积
        super(point_conv, self).__init__()
        self.conv_1x1 = nn.Conv2d(
            in_channels=in_channels, out_channels=out_channels, kernel_size=1
        )
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(True)

    def forward(self, x):
        out = self.conv_1x1(x)
        out = self.bn(out)
        out = self.relu(out)
        return out


class My_Mobilenet(nn.Module):
    def __init__(self, num_classes):
        super(My_Mobilenet, self).__init__()
        self.num_classes = num_classes
        # if large_img:
        #     self.features = nn.Sequential(
        #         nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2),
        #         # nn.BatchNorm2d(32),
        #         nn.ReLU(inplace=True),
        #         dw_conv(32, 32, 1),
        #         point_conv(32, 64),
        #         dw_conv(64, 64, 2),
        #         point_conv(64, 128),
        #         dw_conv(128, 128, 1),
        #         point_conv(128, 128),
        #         dw_conv(128, 128, 2),
        #         point_conv(128, 256),
        #         dw_conv(256, 256, 1),
        #         point_conv(256, 256),
        #         dw_conv(256, 256, 2),
        #         point_conv(256, 512),
        #
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #
        #         dw_conv(512, 512, 2),
        #         point_conv(512, 1024),
        #         dw_conv(1024, 1024, 2),
        #         point_conv(1024, 1024),
        #         nn.AvgPool2d(7),
        #         )
        # else:
        self.features = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            dw_conv(32, 32, 1),
            point_conv(32, 64),
            dw_conv(64, 64, 1),
            point_conv(64, 128),
            dw_conv(128, 128, 1),
            point_conv(128, 128),
            dw_conv(128, 128, 1),
            point_conv(128, 256),
            dw_conv(256, 256, 1),
            point_conv(256, 256),
            dw_conv(256, 256, 1),
            point_conv(256, 512),

            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),

            dw_conv(512, 512, 1),
            point_conv(512, 1024),
            dw_conv(1024, 1024, 1),
            point_conv(1024, 1024),
            nn.AvgPool2d(4),
        )
        self.fc = nn.Linear(1024, self.num_classes)

    def forward(self, x):
        out = self.features(x)
        out = out.view(-1, 1024)
        out = self.fc(out)

def mobilenet(num_classes):
    """
Model has been designed to work on either ImageNet or CIFAR-10
    :param num_classes:1000 for ImageNet, 10 for CIFAR-10
    :param large_img:True for ImageNet, False for CIFAR-10
    :param kwargs:
    :return:model
    """
    model = My_Mobilenet(num_classes)
    if use_cuda:
        model = model.cuda()
    return model


# from torchsummary import summary
# model = mobilenet(10, False)
# print(summary(model, (-1, 224, 224, -1)))
  1. 数据预处理加载

import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
from mobilenet_v1 import mobilenet

#  数据预处理 transforms
#  数据加载 datasets

#  transforms the dataset
train_transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # size大小可设置为 (32, 32)
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

valid_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

test_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

#  load the dataset
valid_size = 0.1
train_dataset = datasets.CIFAR10(root='cifar10', train=True, download=False, transform=train_transform)
# 当根目录没有'cifar10'数据集时,train=True可以直接从网上下载该数据集并保留在预先设置的根目录中
valid_dataset = datasets.CIFAR10(root='cifar10', train=True, download=False, transform=valid_transform)

# 进行随机采样,根据需求可有可无
num_train = len(train_dataset)
# print(num_train)
indices = list(range(num_train))
split = int(valid_size * num_train)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)

train_loader = DataLoader(dataset=train_dataset, batch_size=16, sampler=train_sampler)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=16, sampler=valid_sampler)

test_dataset = datasets.CIFAR10(root='cifar10', train=False, download=False, transform=valid_transform)
test_loader = DataLoader(dataset=test_dataset, batch_size=16, shuffle=False)


# print('len(train_loader):{}\tlen(valid_loader):{}\tlen(test_loader):{}'.format(
#     len(train_loader), len(valid_loader), len(test_loader)))
#
# print(train_loader.dataset)
  1. 网络训练

"""
模型评估从以下三个部分来设计:损失函数设计、模型训练、模型验证测试
"""
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
from mobilenet_v1 import mobilenet
from data_process_load import train_loader, valid_loader, test_loader
# import cifar10


use_cuda = torch.cuda.is_available()

model = mobilenet(num_classes=10)  # 先使用cifar10这个小数据集
optimizer = optim.Adam(model.parameters(), lr=0.01)
scheduler = StepLR(optimizer=optimizer, step_size=10, gamma=0.5)
criterion = nn.CrossEntropyLoss()


#  定义训练函数
def train(epoch):
    model.train()  # 表明所有的参数都在训练,需要更新,而不再固定
    for batch_idx, (datasets, target) in enumerate(train_loader):
        if use_cuda:
            datasets, target = datasets.cuda(), target.cuda()
        # data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(datasets)
        correct = 0
        # print(output)
        pred = output.data.max(1, keepdim=True)[1]
        # print(target.data.view_as(pred))
        # print(target, target.data)
        correct += pred.eq(target.data.view_as(pred)).sum()  # .eq()统计相同的个数
        loss = criterion(output, target)
        loss.backward()
        accuracy = 100.0 * correct / len(output)
        optimizer.step()
        if batch_idx % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss:{:.6f}, Accuracy:{:.2f}'.format(
                epoch, batch_idx * len(datasets), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.item(), accuracy
            ))
    scheduler.step()


#   定义验证函数
def validate(epoch):
    model.eval()
    valid_loss = 0
    correct = 0
    for data, target in valid_loader:
        if use_cuda:
            data, target = data.cuda(), target.cuda()
        # data, target = Variable(data), Variable(target)
        output = model(data)
        valid_loss += F.cross_entropy(output, target, size_average=False).item()
        pred = output.data.max(1, keepdim=True)[1]
        correct +=pred.eq(target.data.view_as(pred)).sum()  # 正确类别分类个数

    valid_loss /= len(valid_loader)
    accuracy = 100. * correct / len(valid_loader)
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{}(:.2f)%\n'.format(
        valid_loss, correct, len(valid_loader), 100.0 * correct / len(valid_loader)
    ))
    return valid_loss, accuracy


def test(epoch):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if use_cuda:
            data, target = data.cuda(), target.cuda()
        # data, target = Variable(data, validate(True)), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(output, target, size_average=False).item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss += len(test_loader)
    print('\nTest set: Average loss:{.4f}, Accuracy:{}/{}(:.2f)%\n'.format(
        test_loss, correct, len(test_loader), 100.0 * correct / len(test_loader)
    ))


for epoch in range(50):
    train(epoch)
    loss, accuracy = validate(epoch)

将会出现一下报错情况。

通过debug发现是网络中定义的类函数forward没有返回值再下方添加return out即可解决问题。

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值