pytorch保存模型

仅作为记录,大佬请跳过。

保存模型

torch.save(model,'/Data3T/tempt/vit-pytorch/model_toal.pkl')
torch.save(model.state_dict(),'/Data3T/tempt/vit-pytorch/net_params.pkl')

参考:传送门

博主程序:
在这里插入图片描述

引用模型

model=torch.load('../model_toal.pkl')
print(model)

博主程序:

在这里插入图片描述

程序记录

博友请跳过,仅作为记录。

import torch
model=torch.load('../model_toal.pkl')
print(model)
import glob
import os
from sklearn.model_selection import train_test_split
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
seed = 42
batch_size = 64

device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

train_dir = '/Data3T/tempt/vit-pytorch/examples/data/train/train_images'
test_dir = '/Data3T/tempt/vit-pytorch/examples/data/test/test_images'#'data/test'

train_list = glob.glob(os.path.join(train_dir,'*.png'))#glob.glob(os.path.join(train_dir,'*.jpg'))
test_list = glob.glob(os.path.join(test_dir, '*.png'))#glob.glob(os.path.join(test_dir, '*.jpg'))

print(f"Train Data: {len(train_list)}")
print(f"Test Data: {len(test_list)}")

labels = [path.split('/')[-1].split('_')[0] for path in train_list]

train_list, valid_list = train_test_split(train_list,
                                          test_size=0.2,
                                          stratify=labels,
                                          random_state=seed)

print(f"Train Data: {len(train_list)}")
print(f"Validation Data: {len(valid_list)}")
print(f"Test Data: {len(test_list)}")

train_transforms = transforms.Compose(
    [
        transforms.Resize((224, 224)),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ]
)

val_transforms = transforms.Compose(
    [
        transforms.Resize((224, 224)),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ]
)


test_transforms = transforms.Compose(
    [
        transforms.Resize((224, 224)),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ]
)



class CatsDogsDataset(Dataset):
    def __init__(self, file_list, transform=None):
        self.file_list = file_list
        self.transform = transform

    def __len__(self):
        self.filelength = len(self.file_list)
        return self.filelength

    def __getitem__(self, idx):
        img_path = self.file_list[idx]
        img = Image.open(img_path)
        img_transformed = self.transform(img)

        label = img_path.split("/")[-1].split(".")[0]
        label = 1 if label == "dog" else 0

        return img_transformed, label

train_data = CatsDogsDataset(train_list, transform=train_transforms)
valid_data = CatsDogsDataset(valid_list, transform=test_transforms)
test_data = CatsDogsDataset(test_list, transform=test_transforms)

train_loader = DataLoader(dataset = train_data, batch_size=batch_size, shuffle=True )
valid_loader = DataLoader(dataset = valid_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset = test_data, batch_size=batch_size, shuffle=True)

print(len(train_data), len(train_loader))

print(len(valid_data), len(valid_loader))
from PIL import Image
import torch.nn as nn
criterion = nn.CrossEntropyLoss()

with torch.no_grad():
    epoch_val_accuracy = 0
    epoch_val_loss = 0
    for data, label in valid_loader:
        data = data.to(device)
        label = label.to(device)

        val_output = model(data)
        val_loss = criterion(val_output, label)

        acc = (val_output.argmax(dim=1) == label).float().mean()
        epoch_val_accuracy += acc / len(valid_loader)
        epoch_val_loss += val_loss / len(valid_loader)

print('acc:',acc)
print('epoch_val_accuracy:',epoch_val_accuracy)
print('epoch_val_loss:',epoch_val_loss)

**************

迁移学习resnet保存模型

'''
保存模型
'''
# 保存
torch.save(model_conv, '\model10.pkl')
# 加载
# model = torch.load('\model.pkl')
# 保存
torch.save(model_conv.state_dict(), '\parameter10.pkl')
# 加载
# model = TheModelClass(...)
# model.load_state_dict(torch.load('\parameter.pkl'))

展示:

在这里插入图片描述
位于/Data4T/unet/data

*****************

在新的ViT中设置保存模型

在这里插入图片描述

位于/Data4T/unet/ViT/vit-pytorch/examples

保存模型权重 和 加载模型权重

torch.save(model.state_dict(), '/output/checkpoint.pth')
model.load_state_dict(torch.load('/output/checkpoint.pth'))

在这里插入图片描述

加载模型权重时,需要先将模型创建出,如博主的:

在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值