深度学习算法多分类

多分类代码部分 

1.  导入必须要的包

import numpy as np
import torch
import torch.nn as nn
import copy
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as f
import torch.optim as optim

2.  定义多分类网络

class Classifier(nn.Module):
    def __init__(self, input_n, output_n):
        super(Classifier, self).__init__()
        self.fc1 = nn.Linear(input_n, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, 128)
        self.fc4 = nn.Linear(128, 39)
    def forward(self, x):
        x = f.sigmoid(self.fc1(x))
        x = f.sigmoid(self.fc2(x))
        x = f.sigmoid(self.fc3(x))
        x = self.fc4(x)
        return x

3.  定义有关Dataset, DataLoader的数据整理

class get_dataset(Dataset):
    def __init__(self, x, y=None):
        self.data = torch.from_numpy(x).float()
        if y is not None:
            y = y.astype(int)
            self.target = torch.LongTensor(y)
        else:
            self.target = None
    def __getitem__(self, index):
        if self.target is not None:
            return self.data[index], self.target[index]
        else:
            return self.data[index]
    def __len__(self):
        return len(self.data)

def prep_dataloader(x, y, batch_size, mode):
    dataset = get_dataset(x, y)
    dataloader = DataLoader(dataset, batch_size = batch_size, shuffle = (mode == 'train'))
    return dataloader

4.  定义config

class config():
    def __init__(self):
        self.val_ratio = 0.2
        self.device = torch.device('cpu')
        self.seed = 0
        self.batch_size = 64
        self.n_epoch = 20
        self.lr = 0.0001

5.  定义训练

def train(tr_set, dv_set, train_x, val_x, model, cfg):
    opt = optim.Adam(model.parameters(), lr = cfg.lr)
    output_model = None
    best_acc = 0.0
    for epoch in range(cfg.n_epoch):
        train_acc = 0.0
        train_loss = 0.0
        val_acc = 0.0
        val_loss = 0.0
        for x, y in tr_set:
            x, y = x.to(cfg.device), y.to(cfg.device)
            pred = model(x)
            batch_loss = nn.CrossEntropyLoss()(pred, y)
            _, train_pred = torch.max(pred, 1)
            opt.zero_grad()
            batch_loss.backward()
            opt.step()
            
            train_acc += (train_pred.cpu() == y.cpu()).sum().item()
            train_loss += batch_loss.item()
        
        if len(dv_set) > 0:
            with torch.no_grad():
                for x, y in dv_set:
                    x, y = x.to(cfg.device), y.to(cfg.device)
                    pred = model(x)
                    batch_loss = nn.CrossEntropyLoss()(pred, y)
                    _, val_pred = torch.max(pred, 1)
                    val_acc += (val_pred.cpu() == y.cpu()).sum().item()
                    val_loss += batch_loss.item()
                    
                if val_acc > best_acc:
                    best_acc = val_acc
                    output_model = copy.deepcopy(model)
                    print('epoch : %d, train_acc : %f, train_loss : %f, val_acc : %f, val_loss : %f, update model'%(
                    epoch + 1, train_acc / len(train_x), train_loss / len(tr_set), val_acc / len(val_x), val_loss / len(dv_set)))
                else:
                    print('epoch : %d, train_acc : %f, train_loss : %f, val_acc : %f, val_loss : %f'%(
                    epoch + 1, train_acc / len(train_x), train_loss / len(tr_set), val_acc / len(val_x), val_loss / len(dv_set)))
        else:
            print('epoch : %d, train_acc : %f, train_loss : %f'%(epoch + 1, train_acc / len(train_x), train_loss / len(tr_set)))
    return output_model

6.  定义测试

def test(tt_set, model):
    preds = []
    for x in tt_set:
        x = x.to(cfg.device)
        with torch.no_grad():
            pred = model(x)
            _, pred = torch.max(pred, 1)
            preds.append(pred.detach().cpu())
    preds = torch.cat(preds, dim = 0).numpy()
    return preds

7.  main函数以及运行结果

tr_path = './data/train_11.npy'
tr_label_path = './data/train_label_11.npy'
tt_path = './data/test_11.npy'
train_data = np.load(tr_path)
train_label = np.load(tr_label_path)
test_data = np.load(tt_path)
print('size of training data : ', train_data.shape)
print('size of testing data : ', test_data.shape)

cfg = config()
percent = int(train_data.shape[0] * (1 - cfg.val_ratio))
train_x, train_y, val_x, val_y = train_data[:percent], train_label[:percent], train_data[percent:], train_label[percent:]
print('size of training data', train_x.shape)
print('size of validation data', val_x.shape)
tr_set = prep_dataloader(train_x, train_y, cfg.batch_size, 'train')
dv_set = prep_dataloader(val_x, val_y, cfg.batch_size, 'val')
tt_set = prep_dataloader(test_data, None, cfg.batch_size, 'test')

model = Classifier(train_data.shape[1], 39).to(cfg.device)
output_model = train(tr_set, dv_set, train_x, val_x, model, cfg)

preds = test(tt_set, output_model)

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值