LSTM CNN FC

「公开课干货分享」LSTM长短期记忆神经网络的学习与实现

 

 

LSTM网络本质还是RNN网络 

LSTM是为了解决RNN中的反馈消失问题而被提出的模型,它也可以被视为RNN的一个变种。与RNN相比,增加了3个门(gate):input门,forget门和output门。

 
import torch.nn as nn
from torch.nn import functional as F
 
 
class LSTM(nn.Module):
    def __init__(self):
        super(LSTM, self).__init__()
        self.rnn = nn.LSTM(
            input_size=14,
            hidden_size=32,
            num_layers=2,
            batch_first=True
        )
        self.fc = nn.Linear(32, 2)
 
    def forward(self, x):
        out, _ = self.rnn(x)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        # return F.log_softmax(out)
        return out
 
 
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv1d(1, 32, 5),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.Conv1d(32, 64, 5),
            nn.BatchNorm1d(64),
            nn.Conv1d(64, 128, 5),
            nn.BatchNorm1d(128),
        )
        self.fc = nn.Linear(256, 2)
 
    def forward(self, x):
        out = self.cnn(x)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out
 
 
class FC(nn.Module):
    def __init__(self):
        super(FC, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(14, 128),
            nn.Linear(128, 256),
            nn.Linear(256, 128),
            nn.Linear(128, 2)
        )
 
    def forward(self, x):
        out = x.view(x.size(0), -1)
        out = self.fc(out)
        return out
 
# from https://github.com/kenandaoerdect/one-dimensional-data-classification/blob/master/model.py
import numpy as np
import pandas as pd
import torch
import model
from torch.utils import data
from torch import optim, nn
from torch.nn import functional as F
 
 
def load_data(train_data, train_label, test_data, test_label):
    train_data = train_data[:, np.newaxis]
    train_label = np.squeeze(train_label, axis=1)
    train_data = torch.tensor(train_data, dtype=torch.float32)
    train_label = torch.tensor(train_label)
    dataset = data.TensorDataset(train_data, train_label)
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=True)
 
    test_data = test_data[:, np.newaxis]
    test_label = np.squeeze(test_label, axis=1)
    test_data = torch.tensor(test_data, dtype=torch.float32)
    test_label = torch.tensor(test_label)
    dataset_test = data.TensorDataset(test_data, test_label)
    test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batchsize, shuffle=False)
    return data_loader, test_loader
 
 
def train(epoch, train_loader):
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        output = net(data)
        loss = criterion(output, target)
        if batch_idx % 20 == 0:
            print('Epoch:[{}/{}] [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, epochs, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
 
 
def evaluate(test_loader):
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        data, target = data.to(device), target.to(device)
        output = net(data)
        # sum up batch loss
        test_loss += criterion(output, target).item()
        # get the index of the max log-probability
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
 
    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
 
 
def run():
    train_loader, test_loader = load_data(train_data, train_label, test_data, test_label)
    for epoch in range(1, epochs):
        train(epoch, train_loader)
        evaluate(test_loader)
 
 
if __name__ == '__main__':
 
    batchsize = 128
    epochs = 10000
 
    train_data = np.array(pd.read_csv('data/train.csv', header=None))
    train_label = np.array(pd.read_csv('data/trainlabel.csv', header=None))
    test_data = np.array(pd.read_csv('data/test.csv', header=None))
    test_label = np.array(pd.read_csv('data/testlabel.csv', header=None))
 
    device = torch.device("cuda")
    net = model.LSTM().to(device)
    optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
    criterion = nn.CrossEntropyLoss()
 
    run()
 
#from https://github.com/kenandaoerdect/one-dimensional-data-classification/blob/master/main.py

 

 

 

 

 

 

 

 

 

 

 

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值