365天深度学习训练营第p6周好莱坞明星识别心得体会

该代码示例展示了如何利用PyTorch进行图像数据预处理,包括使用ImageFolder加载数据,应用变换如Resize和Normalize,然后使用vgg16预训练模型进行多分类任务。模型经过训练和测试,调整了最后一层以适应16个类别,并用CrossEntropyLoss作为损失函数,Adam作为优化器,以及自定义的学习率调度策略。
摘要由CSDN通过智能技术生成
import torch
import torch.nn as nn
import torchvision
import os,pathlib,warnings,random
from PIL import Image
warnings.filterwarnings('ignore')
from torchvision import transforms as transforms,datasets
import matplotlib.pyplot as plt
import copy

device=('cuda'if torch.cuda.is_available()else'cpu')
print(device)
data_dir='D://BaiduNetdiskDownload//深度学习p6'
data_dir1='D://BaiduNetdiskDownload//深度学习p6//Angelina Jolie//001_fe3347c0.jpg'
print('方便在下面,将data_dir传入ImageFolder中,根据地址,对数据进行转换')

data_dir=pathlib.Path(data_dir)
data_paths=list(data_dir.glob('*'))
print(data_paths[0])
print('打印出首条地址,看一下地址的构成情况有没有问题')

classeNames=[str(path).split('\\')[-1] for path in data_paths]
print('地址中,可能用\表示地址的路线,但是当我们要将其进行分裂的时候,挑出最后的文件名的时候,要用两个\表示\,这个是windows的要求')
print(classeNames)
print('其实封装好数据的文件名就是一个类别的类别名')
train_transforms=transforms.Compose([
    transforms.Resize([224,224]),
transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(
        mean=[0.485,0.456,0.406],
        std=[0.229,0.224,0.225]
    )
])
total_data=datasets.ImageFolder(data_dir,transform=train_transforms)
print(total_data)
print('犯了一个错误,将参数transform写成transforms,实际上ImageFolder中参数就是transform,我受到了上面train_transforms的影响')
leibie=total_data.class_to_idx
print(leibie)
print('将文件名及其索引打印出来,相当于设定好了要分类的类别,这次一共16个类别,属于多分类任务')


train_size=int(0.8*len(total_data))
print('犯了一个错误,将0.8放在外面*int,结果train_size就是一个浮点数,从而影响了下面的数据切割,一定要将0.8放在里面,这样train_size就是一个整数')

test_size=len(total_data)-train_size
print(train_size,test_size)

train_dataset,test_dataset=torch.utils.data.random_split(total_data,[train_size,test_size])
print(train_dataset,test_dataset)
print('已经生成训练集和测试集成功')

batch_size=256
train_dl=torch.utils.data.DataLoader(
    train_dataset,
    batch_size=batch_size,
    shuffle=True
)
test_dl=torch.utils.data.DataLoader(
    test_dataset,
    batch_size=batch_size,
    shuffle=False
)
print('DataLoader中,参数num_workers在windows似乎不能发挥作用,所以我省略了')
print(train_dl,test_dl)
print('犯了一个错误,就是想打印train_dl[0],这个DataLoader是不行的')

for X,y in test_dl:
    print('shape of x[N,C,H,W]:',X.shape)
    print('shape of y:',y.shape,y.dtype)
    break
from torchvision.models import vgg16
model=vgg16(pretrained=True).to(device)
print('加载vgg16的预训练模型,将其放到gpu中')
print(model)
print('看一下模型什么样子,由features,avgpool,classifier三个阶段,这里看一下classifier的最后一层也就是第六层,输出为1000')

for param in model.parameters():
    param.requires_grad=False
print('不优化梯度,也就是模型参数不变')
model.classifier._modules['2']=nn.Dropout(p=0.3,inplace=False)
model.classifier._modules['5']=nn.Dropout(p=0.3,inplace=False)
model.classifier._modules['6']=nn.Linear(4096,len(classeNames))
model.to(device)
print(model)
print('这里将上面提到的输出1000,改为len(classeNames),也就是实际的分类数目')

def train(dataloader,model,loss_fn,optimizer):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)
    train_loss,train_acc=0,0
    for X,y in dataloader:
        X,y=X.to(device),y.to(device)
        pred=model(X)
        loss=loss_fn(pred,y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_acc+=(pred.argmax(1)==y).type(torch.float).sum().item()
        train_loss+=loss.item()
    train_acc/=size
    train_loss/=num_batches
    return train_acc,train_loss

def test(dataloader,model,loss_fn):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)
    test_acc,test_loss=0,0
    with torch.no_grad():
        for X,y in dataloader:
            X,y=X.to(device),y.to(device)
            pred=model(X)
            loss=loss_fn(pred,y)

            test_acc+=(pred.argmax(1)==y).type(torch.float).sum().item()
            test_loss=loss.item()
    test_acc/=size
    test_loss/=num_batches
    return test_acc,test_loss

learn_rate=1e-5
lambda1=lambda epoch:0.92**(epoch//4)
optimizer=torch.optim.Adam(model.parameters(),lr=learn_rate)
scheduler=torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda1)
loss_fn=nn.CrossEntropyLoss()

epochs=40
epochs_range=range(epochs)
train_acc=[]
train_loss=[]
test_acc=[]
test_loss=[]

best_acc=0

for epoch in epochs_range:
    model.train()
    epoch_train_acc,epoch_train_loss=train(train_dl,model,loss_fn,optimizer)
    scheduler.step()

    model.eval()
    epoch_test_acc,epoch_test_loss=test(test_dl,model,loss_fn)

    if epoch_test_acc>best_acc:
        best_acc=epoch_test_acc
        best_model=copy.deepcopy(model)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    lr=optimizer.state_dict()['param_groups'][0]['lr']
    template=('Epoch:{:2d},train_acc:{:.1f}%,test_acc:{:.1f}%,train_loss:{:.3f},test_loss:{:.3f},Lr:{:.2E}')
    print(template.format(epoch+1,epoch_train_acc*100,epoch_test_acc*100,epoch_train_loss,epoch_test_loss,lr))
#PATH='保存的地址'
#torch.save(model.state_dict(),PATH)
print('好了')
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
plt.rcParams['figure.dpi']=100
plt.figure(figsize=[20,10])

plt.subplot(1,2,1)
plt.plot(epochs_range,train_acc,label='Training Accuracy')
plt.plot(epochs_range,test_acc,label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Test Accuracy')

plt.subplot(1,2,2)
plt.plot(epochs_range,train_loss,label='Training Loss')
plt.plot(epochs_range,test_loss,label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Test Loss')

plt.show()

classes=list(leibie)

def predict_one_image(image_path,model,transform,classes):
    test_img=Image.open(image_path).convert('RGB')#打开图片地址,将RGBA四通道转化为RGB三通道
    print(test_img)
    plt.imshow(test_img)#将图片中的数值转化成0,1格式,并且赋予单元格渐变色
    test_img=transform(test_img)
    print(test_img.shape)
    print('此时test_img为tensor格式')
    img=test_img.to(device).unsqueeze(0)
    print('将torch.Size([3, 224, 224])改为torch.Size([1, 3, 224, 224])')
    print(img.shape)
    print('开始进入模型测试环节')
    model.eval()
    output=model(img)
    print(output)
    _,pred=torch.max(output,1)
    print(_)#tensor([0.3122], device='cuda:0', grad_fn=<MaxBackward0>)
    print(pred)#在所有行中,索引排在第一个
    print('1代表行,返回这一行中最大数的位置')
    pred_class=classes[pred]
    print(f'预测结果是:{pred_class}')

predict_one_image(
    data_dir1,
    model=model,
    transform=train_transforms,
    classes=classes
)
best_model.eval()
epoch_test_acc,epoch_test_loss=test(test_dl,best_model,loss_fn)
print(epoch_test_acc,epoch_test_loss)

 模型泛化能力不错,就是偏差不行

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值