手写一个训练流程-后续检查

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch .optim as optim
from torch import device
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms,models
import torchvision
import os
from net import simpleconv3
from tensorboardX import SummaryWriter
writer = SummaryWriter('logs')
from torch.optim.lr_scheduler import StepLR

def train(model,criterion , optimizer, lr_scheduler, epochs):
    for epoch in range(epochs):
        print("epoch time:{epoch}/{epochs}".format(epoch=epoch, epochs=epochs))
        for phase in ['train','val']:
            if phase == 'train':
                model.train()
                lr_scheduler.step()
            else:
                model.eval()

            running_loss = 0.0 ##损失变量
            running_accs = 0.0 ##精度变量
            number_batch = 0 ##
        for data in dataloader[phase]:
            images, labels = data
            optimizer.zero_grad()
            use_gpu = torch.cuda.is_available()
            if use_gpu:
                images = images.cuda()
                labels = labels.cuda()
            outputs = model(images)
            loss = criterion(outputs, labels)
            _ ,preds = torch.max(outputs,1)
            if phase == 'train':
                loss.backward()
                optimizer.step()


            running_loss += loss.data.item()
            running_accs += torch.sum(preds == labels).item()
            number_batch += 1
            ## 得到每一个epoch的平均损失与精度
            epoch_loss = running_loss / number_batch
            epoch_acc = running_accs / dataset_sizes[phase]

            ## 收集精度和损失用于可视化
            if phase == 'train':
                writer.add_scalar('data/trainloss', epoch_loss, epoch)
                writer.add_scalar('data/trainacc', epoch_acc, epoch)
            else:
                writer.add_scalar('data/valloss', epoch_loss, epoch)
                writer.add_scalar('data/valacc', epoch_acc, epoch)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                phase, epoch_loss, epoch_acc))


    writer.close()
    return model
if __name__ == '__main__':
    image_size = 60
    crop_size = 48#input
    num_classes = 2
    model = simpleconv3(num_classes) ##创建模型
    data_dir = './data'
    if not os.path.exists('models'):
        os.mkdir('models')

    use_gpu = torch.cuda.is_available()
    if use_gpu:
       models = models.cuda()

    ## 创建数据预处理函数,训练预处理包括随机裁剪缩放、随机翻转、归一化,验证预处理包括中心裁剪,归一化
    data_transforms = {
        'train': transforms.Compose([
            transforms.RandomResizedCrop(48),  # 在随机大小和比例下裁剪图像,然后缩放到48x48
            transforms.RandomHorizontalFlip(),  # 以一定的概率水平翻转图像
            transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为torch.Tensor,并缩放到[0.0, 1.0]
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  # 标准化图像
        ]),
        'val': transforms.Compose([
            transforms.Resize(64),  # 缩放图像到64x64
            transforms.CenterCrop(48),  # 从中心裁剪出48x48的区域
            transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为torch.Tensor,并缩放到[0.0, 1.0]
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  # 标准化图像
        ]),
    }

    image_dataset = {x:datasets.ImageFolder(os.path.join(data_dir,x),data_transforms[x]) for x in ['train','val']}




    dataloader = {x:torch.utils.data.DataLoader(
        dataset=datasets[x],
        batch_size=13,
        shuffle=True,
        num_workers=4,
    )for x in ['train', 'test']
     }
    image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
                                              data_transforms[x]) for x in ['train', 'val']}
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
    criterion = nn.CrossEntropyLoss()
    optimizer_ft = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
    exp_lr_scheduler = StepLR(optimizer_ft, step_size=100, gamma=0.1)
    model = train(model=model,
                           criterion=criterion,
                           optimizer=optimizer_ft,
                           scheduler=exp_lr_scheduler,
                           num_epochs=300)

    torch.save(model.state_dict(),'models/model.pt')

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
这里提供一个使用Python和Keras框架训练手写数字识别数据集的代码示例: ```python # 导入依赖库 import numpy as np import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout from keras.optimizers import RMSprop # 加载数据集并进行预处理 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # 将标签转换为one-hot编码 num_classes = 10 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) # 构建模型 model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) # 编译模型 model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) # 训练模型 batch_size = 128 epochs = 20 history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) # 评估模型 score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ``` 这个代码使用了一个基于全连接层的神经网络模型。在训练过程中,使用了RMSprop优化器和交叉熵损失函数。最终,输出了测试集上的准确率。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值