一个基于resnet的口罩检测图片分类(自定义数据集)

1. 首先需要准备数据集,数据集模式按照如下格式进行准备,测试集也一样按照如下格式准备。

Data

       ----train

                ----mask

                ----nomask

        ----val

                ----mask

                ----nomask

2. 数据集加载和预处理

import torchvision.transforms as transforms
transform = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])

2. 分别写入训练集跟验证集

# 分别写入训练集和测试集的路径哦
dataset_train = datasets.ImageFolder('/content/Data/train/', transform)
print(dataset_train.imgs)

print(dataset_train.class_to_idx)
dataset_test = datasets.ImageFolder('/content/Data/val/', transform_test)

print(dataset_test.class_to_idx)

# 获取数据
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)
#学习率
modellr = 1e-4
#损失函数
criterion = nn.CrossEntropyLoss()
#model(resnet18)创建一个新的模型,使用resnet进行分类
model = torchvision.models.resnet50(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
model.to(DEVICE)
#优化器,通常是使用adam
optimizer = optim.Adam(model.parameters(), lr=modellr)

3. 分别进行训练和验证

def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    modellrnew = modellr * (0.1 ** (epoch // 50))
    print("lr:", modellrnew)
    for param_group in optimizer.param_groups:
        param_group['lr'] = modellrnew


# trainning
def train(model, device, train_loader, optimizer, epoch):
    model.train()
    sum_loss = 0
    total_num = len(train_loader.dataset)
    print(total_num, len(train_loader))
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = Variable(data).to(device), Variable(target).to(device)
        output = model(data)
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print_loss = loss.data.item()
        sum_loss += print_loss
        if (batch_idx + 1) % 50 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
                       100. * (batch_idx + 1) / len(train_loader), loss.item()))
    ave_loss = sum_loss / len(train_loader)
    print('epoch:{},loss:{}'.format(epoch, ave_loss))

# cross-validation
def val(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    total_num = len(test_loader.dataset)
    print(total_num, len(test_loader))
    with torch.no_grad():
        for data, target in test_loader:
            data, target = Variable(data).to(device), Variable(target).to(device)
            output = model(data)
            loss = criterion(output, target)
            _, pred = torch.max(output.data, 1)
            correct += torch.sum(pred == target)
            print_loss = loss.data.item()
            test_loss += print_loss
        correct = correct.data.item()
        acc = correct / total_num
        avgloss = test_loss / len(test_loader)
        print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            avgloss, correct, len(test_loader.dataset), 100 * acc))
        temp_acc = 100 * acc

# saving model
for epoch in range(1, EPOCHS + 1):
    adjust_learning_rate(optimizer, epoch)
    train(model, DEVICE, train_loader, optimizer, epoch)
    val(model, DEVICE, test_loader)
    torch.save(model, 'model' + str(epoch) + '.pth')

测试结果:

# test_dataset loading
classes = ('mask', 'nomask')
transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])


DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torch.load("model4.pth")
model.eval()
model.to(DEVICE)


delDir3 = "/content/Data/test/mask/.ipynb_checkpoints"
if(os.path.exists(delDir3)):
  os.rmdir(delDir3)
path='/content/Data/test/mask/'
testList=os.listdir(path)
# begin testing
count = 0
total = 0
for file in testList:
    img=Image.open(path+file)
    img=transform_test(img)
    img.unsqueeze_(0)
    img = Variable(img).to(DEVICE)
    out=model(img)
    # Predict
    _, pred = torch.max(out.data, 1)
    total = total + 1
    if pred.data.item() == 0:
      count = count + 1
    print('Image Name:{},predict:{},predict_label:{}'.format(file,pred.data.item(),classes[pred.data.item()]))

print('mask data test accuracy:{}'.format(count/total))
count1 = 0
total1 = 0
delDir4 = "/content/Data/test/nomask/.ipynb_checkpoints"
if(os.path.exists(delDir4)):
  os.rmdir(delDir4)
path='/content/Data/test/nomask/'
testList=os.listdir(path)
# begin testing
for file in testList:
    img=Image.open(path+file)
    img=transform_test(img)
    img.unsqueeze_(0)
    img = Variable(img).to(DEVICE)
    out=model(img)
    # Predict
    _, pred = torch.max(out.data, 1)
    total1 = total1 + 1
    if pred.data.item() == 1:
      count1 = count1 + 1
    print('Image Name:{},predict:{},predict_label:{}'.format(file,pred.data.item(),classes[pred.data.item()]))
print('nomask data test accuracy:{}'.format(count1/total1))

def g_mean(tp,fn,fp,tn):
    tpr = tp/(tp+fn)
    tnr = tn/(tn+fp)
    ret = math.sqrt(tpr*tnr)
    return ret
value = g_mean(count,total-count,total1-count1,count1)
print('G-mean is :'+str(value))

想看可视化显示的看这篇文章:不多赘述了

Pytorch 实现Resnet分类Cifar10数据集>95%_resnet cifar-10 pytorch-CSDN博客

  • 6
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值