【基于pytorch的逻辑回归模型实现性别识别】

任务

根据一个人的身高与体重,预测其性别

神经网络

采用最简单的单层神经网络,logistic regression模型(逻辑回归模型)
在这里插入图片描述

import torch
import math
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms, models
import argparse
import os
from torch.utils.data import DataLoader, Dataset

from dataloader import mnist_loader as ml
from models.cnn import Net

class SexDataset(Dataset):
    # 把数据一次性读进来放入data中
    def __init__(self, txt, transform=None, target_transform=None):
        fh = open(txt, 'r')
        data = []
        for line in fh:
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            data.append((float(words[0]), float(words[1]), int(words[2])))
        self.data = data

    def __getitem__(self, index):
        return torch.FloatTensor([self.data[index][0], self.data[index][1]]), self.data[index][2]

    def __len__(self):
        return len(self.data)

class SexNet(nn.Module):
    def __init__(self):
        super(SexNet, self).__init__()
        # 全连接
        self.dense = nn.Sequential(
            nn.Linear(2, 2)
        )

    def forward(self, x):
        out = self.dense(x) #只做一次全连接
        return out

def train():
    batchsize = 10 #一批次加载10个样本进行训练
    train_data = SexDataset(txt='gender/sex_train.txt')
    val_data = SexDataset(txt='gender/sex_val.txt')
    train_loader = DataLoader(dataset=train_data, batch_size=batchsize, shuffle=True)
    val_loader = DataLoader(dataset=val_data, batch_size=batchsize)

    model = SexNet()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-3)# 优化器,初始学习速率为0.01,权重衰减,学习过程中学习速率需要不断递减
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20], 0.1)#定义了按照阶梯调整学习速率,当达到第十个epoch的时候就把学习速率乘以0.1
    loss_func = nn.CrossEntropyLoss() #损失函数定义为交叉熵损失函数
#下面开始正式训练
    # epochs表示学习轮数
    epochs = 100
    for epoch in range(epochs):
        # training-----------------------------------
        model.train()
        train_loss = 0
        train_acc = 0
        for batch, (batch_x, batch_y) in enumerate(train_loader):
            batch_x, batch_y = Variable(batch_x), Variable(batch_y)
            out = model(batch_x)  # 256x3x28x28  out 256x10
            loss = loss_func(out, batch_y)
            train_loss += loss.item()
            pred = torch.max(out, 1)[1]
            train_correct = (pred == batch_y).sum()
            train_acc += train_correct.item()
            print('epoch: %2d/%d batch %3d/%d  Train Loss: %.3f, Acc: %.3f'
                  % (epoch + 1, epochs, batch, math.ceil(len(train_data) / batchsize),
                     loss.item(), train_correct.item() / len(batch_x)))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        scheduler.step()  # 更新learning rate
        print('Train Loss: %.6f, Acc: %.3f' % (train_loss / (math.ceil(len(train_data)/batchsize)),
                                               train_acc / (len(train_data))))

        # evaluation--------------------------------
        model.eval()
        eval_loss = 0
        eval_acc = 0
        for batch_x, batch_y in val_loader:
            batch_x, batch_y = Variable(batch_x), Variable(batch_y)

            out = model(batch_x)
            loss = loss_func(out, batch_y)
            eval_loss += loss.item()
            pred = torch.max(out, 1)[1]
            num_correct = (pred == batch_y).sum()
            eval_acc += num_correct.item()
        print('Val Loss: %.6f, Acc: %.3f' % (eval_loss / (math.ceil(len(val_data)/batchsize)),
                                             eval_acc / (len(val_data))))
        # save model --------------------------------
        if (epoch + 1) % 1 == 0:
            # torch.save(model, 'output/model_' + str(epoch+1) + '.pth')
            torch.save(model.state_dict(), 'output/params_' + str(epoch + 1) + '.pth')
            #to_onnx(model, 3, 28, 28, 'params.onnx')

if __name__ == '__main__':
    train()

训练结果

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值