EEGNet复现

一、前言

本篇文章记录一下本人复现EEGNet的记录,文章包含三个文件(模块),第一个是脑电数据的预处理,EEGNet模型构建,EEGNet训练模块。

本次实验的数据集是BCIcompetitionIVdata2a数据集

应用的库:mne torch scipy numpy sklearn

二、正文

1.数据预处理

先展示一下编写的数据预处理文件

import mne
import numpy as np
import torch
from sklearn.preprocessing import StandardScaler,OneHotEncoder
import scipy.io
import torchvision.transforms as transforms

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

"""进行预处理前请检查数据是否存在过多的噪声,或者数据是否稳定"""

# 进行数据预处理 只适合T结尾的data数据,其他的需要修改函数 filename需以.npy结尾
def transform_save_data(filename,save_filename):
    raw = mne.io.read_raw_gdf(filename)
    print(raw.info['ch_names'])
    events,event_id = mne.events_from_annotations(raw)
    raw.info['bads'] += ['EOG-left','EOG-central','EOG-right']
    # 运动想象时间2-6秒
    tmin,tmax = 2,6 
    event_id = {'769':7,'770':8,'771':9,'772':10}
    # 需要重新加载raw对象进行滤波处理
    raw.load_data()
    raw.filter(7.0,35.0,fir_design='firwin')
    picks = mne.pick_types(raw.info,meg=False,eeg=True,stim=False,exclude='bads')
    epochs = mne.Epochs(raw=raw,events=events,event_id=event_id,tmin=tmin,tmax=tmax,preload=True,baseline=None,picks=picks)
    epoch_data = epochs.get_data()
    # 将最后一位数据进行去除
    epoch_data = epoch_data[:,:,:-1]


    epoch_data = epoch_data.reshape(epoch_data.shape[0], 1, 22, 1000)
    np.save(save_filename,epoch_data)


# 进行归一化处理
def data_processing(BCI_IV_2a_data,label_filename):
    Scaler = StandardScaler()
    X_train = BCI_IV_2a_data.reshape(BCI_IV_2a_data.shape[0], 22000)
    X_train_Scaler = Scaler.fit_transform(X_train)
    # 进行reshape第二个维度为channels W H
    acc_train = X_train_Scaler.reshape(BCI_IV_2a_data.shape[0], 1, 22, 1000)
    data_label = scipy.io.loadmat(label_filename)
    Label = data_label['classlabel']
    # 查看由多少类
    n_classes = len(np.unique(Label))
    print(n_classes)
    encoder = OneHotEncoder(handle_unknown='ignore')
    y = np.array(Label)
    y_oh = encoder.fit_transform(y).toarray()
    y_oh = y_oh[:-1]

    return acc_train,y_oh


# 进行转换成Tensor格式的数据  保存的文件的格式应该以pt为后缀
def data_transform_tensor(acc_train,y_oh,save_datafilename,save_labelfilename):
    transf = transforms.ToTensor()
    d = transf(y_oh)
    # 去除另外四个维度的标签,标签就是最大值
    label = torch.argmax(d,dim=2).long()

    

    h = torch.squeeze(label)
    data = torch.tensor(acc_train,dtype=torch.float32)
    labels = torch.tensor(h,dtype=torch.long)

    torch.save(data,save_datafilename)
    torch.save(labels,save_labelfilename)



# 将数据进行联合
def combine_data(data_list,label_list,data_filename,label_filename):
    """_summary_
    将增强的EEG_data数据进行拼接 并保存为pt后缀文件
    Parameters
    ----------
    data_list : _type_  tensor
        EEGdata list
    label_list : _type_ tensor
        label list
    data_filename : _type_, optional
        _description_, by default None 
    label_filename : _type_, optional
        _description_, by default None 
    """
    data_combine = torch.cat(data_list, axis=0)
    label_combine = torch.cat(label_list, axis=0)
    torch.save(data_combine, data_filename)
    torch.save(label_combine, label_filename)




# 进行时域上EEG数据增强  通过分割,重构 打乱数据

def interaug(timg, label, batch_size):
    """timg是data label是标签"""
    aug_data = []
    aug_label = []
    for cls4aug in range(4):
        # 条件判断 找出对应的label和data
        cls_idx = np.where(label == cls4aug)  # label == cls4aug + 1
        tmp_data = timg[cls_idx]
        tmp_label = label[cls_idx]
        # 分epoch
        tmp_aug_data = np.zeros((int(batch_size / 4), 1, 22, 1000))
        for ri in range(int(batch_size / 4)):
            # 随机取8个时间片段
            for rj in range(8):
                rand_idx = np.random.randint(0, tmp_data.shape[0], 8)
                # 进行数据的打乱重构
                tmp_aug_data[ri, :, :, rj * 125:(rj + 1) * 125] = tmp_data[rand_idx[rj], :, :, rj * 125:(rj + 1) * 125]

        aug_data.append(tmp_aug_data)
        aug_label.append(tmp_label[:int(batch_size / 4)])
    aug_data = np.concatenate(aug_data)
    aug_label = np.concatenate(aug_label)
    aug_shuffle = np.random.permutation(len(aug_data))
    aug_data = aug_data[aug_shuffle, :, :]
    aug_label = aug_label[aug_shuffle]

    aug_data = torch.from_numpy(aug_data).cuda()
    aug_data = aug_data.float()
    aug_label = torch.from_numpy(aug_label).cuda()  # aug_label - 1
    aug_label = aug_label.long()
    return aug_data, aug_label



# 切分部分数据进行test
def split_EEGdata(data, label):
    data = data.view(data.shape[0], 1, 22, 1000)
    data = data[:100]
    label = label[:100]
    torch.save(data, 'EEG_data_split.pt')
    torch.save(label, 'EEG_label_split.pt')



def transform_save_data_version2(filename,save_filename):
    raw = mne.io.read_raw_gdf(filename)
    print(raw.info['ch_names'])
    events,event_id = mne.events_from_annotations(raw)
    raw.info['bads'] += ['EOG-left','EOG-central','EOG-right']
    # 运动想象时间2-6秒
    tmin,tmax = 2,6
    event_id = {'769':7,'770':8,'771':9,'772':10}
    # 需要重新加载raw对象进行滤波处理
    raw.load_data()
    picks = mne.pick_types(raw.info,meg=False,eeg=True,stim=False,exclude='bads')
    epochs = mne.Epochs(raw=raw,events=events,event_id=event_id,tmin=tmin,tmax=tmax,preload=True,baseline=None,picks=picks)
    epoch_data = epochs.get_data()
    # 将最后一位数据进行去除
    epoch_data = epoch_data[:,:,:-1]


    epoch_data = epoch_data.reshape(epoch_data.shape[0], 1, 22, 1000)
    np.save(save_filename,epoch_data)



if __name__ == '__main__':
    # filename = 'C:\\Users\\24242\\Desktop\\AI_Reference\\data_bag\\BCICIV_2a_gdf\\A06T.gdf'
    # save_filename = 'A06T.npy'
    # transform_save_data(filename,save_filename)
    # data = np.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A06T.npy')
    # label_filename = 'C:\\Users\\24242\\Desktop\\AI_Reference\\data_bag\\BCICIV_2a_gdf\\A06T.mat'
    # acc_train,y_oh = data_processing(data,label_filename)
    # save_datafilename = 'A06T.pt'
    # save_labelfilename = 'A06T_target.pt'
    # data_transform_tensor(acc_train,y_oh,save_datafilename,save_labelfilename)
    # 进行数据的拼接 使深度学习模型训练样本不会太少
     
    # data1 = np.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A01T.npy')
    # data2 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A02T.pt')
    # data3 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A03T.pt')
    # data5 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A05T.pt')
    # label1 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\EEGNet\\A01T_target.pt')
    # label1 = label1[:-1]
    # label2 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\EEGNet\\A02T_target.pt')
    # label2 = label2[:-1]
    # label3 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\EEGNet\\A03T_target.pt')
    # label3 = label3[:-1]
    # label1 = label1.numpy()
    # label5 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\EEGNet\\A05T_target.pt')
    
    # data_list = [data1,data2,data3,data5]
    # label_list = [label1,label2,label3,label5]
    # data_filename = 'combine_data_01.pt'
    # label_filename = 'combine_label_01.pt'
    # label1 = label1[:-1]
    #
    # aug_data, aug_label = interaug(timg=data1,label=label1,batch_size=287)
    #
    # data_1 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\EEGNet\\A01T.pt')
    # label_1 = torch.load('C:\\Users\\24242\\DataspellProjects\\EEG_Project\EEGNet\\A01T_target.pt')
    # print(label_1.shape)
    # print(data_1.shape)
    # print(aug_data.shape)
    # print(aug_label.shape)
    # data_list = [data_1,aug_data]
    # label_list = [label_1,aug_label]
    # combine_data(data_list=data_list,label_list=label_list,data_filename=data_filename,label_filename=label_filename)
    print('------------------new FileProcess------------------')
    # filename = 'C:\\Users\\24242\\Desktop\\AI_Reference\\data_bag\\BCICIV_2a_gdf\\A01T.gdf'
    # save_filename = 'A01T_new_original.npy'
    # # transform_save_data_version2(filename, save_filename)
    # data = np.load(save_filename)
    #
    # label_name = 'C:\\Users\\24242\\Desktop\\AI_Reference\\data_bag\\BCICIV_2a_gdf\\A01T.mat'
    # acc_train, y_oh = data_processing(data,label_name)
    #
    # save_dataFilename = 'A01T_new_original.pt'
    # save_labelFilename = 'A01T_new_label_original.pt'
    # # data_transform_tensor(acc_train, y_oh, save_dataFilename, save_labelFilename)
    data_final = torch.load('A01T_new_original.pt').to('cpu')
    label_final = torch.load('A01T_new_label_original.pt').to('cpu')
    # data_final_01 = torch.load('A01T_new_data_aug.pt').to('cpu')
    # label_final_01 = torch.load('A01T_new_label_aug.pt').to('cpu')
    data_final_02 = torch.load('A01T_new_data_aug_01.pt').to('cpu')
    label_final_02 = torch.load('A01T_new_label_aug_01.pt').to('cpu')
    data_final_03 = torch.load('A01T_new_data_aug_02.pt').to('cpu')
    label_final_03 = torch.load('A01T_new_label_aug_02.pt').to('cpu')

    # data_final = data_final.numpy().astype('float32')
    # label_final = label_final.numpy().astype('float32')
    # print(label_final)
    # split_EEGdata(data_final, label_final)
    # aug_data, aug_label = interaug(data_final, label_final, batch_size=48)
    # print(aug_label)
    # torch.save(aug_data, 'A01T_new_data_aug_03.pt')
    # torch.save(aug_label, 'A01T_new_label_aug_03.pt')
    data_list = [data_final_02, data_final_03, data_final]
    label_list = [label_final_02, label_final_03, label_final]
    data_combine_filename = 'A01T_new_combine.pt'
    data_combine_label_filename = 'A01T_new_label_combine.pt'
    combine_data(data_list, label_list, data_combine_filename, data_combine_label_filename)
    label = torch.load('A01T_new_label_combine.pt')
    data = torch.load('A01T_new_combine.pt')
    # print(label)

大部分的代码都编写了注释,该模块可以直接进行应用,数据经过预处理后。

保存shape为(288,1,22,1000)的数据到文件中进行备用 

数据shape的原始格式为(trial,1,channel,sample)

2.模型构建

先上代码

# 导入工具包
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary


class EEGNet(nn.Module):
    def __init__(self, classes_num):
        super(EEGNet, self).__init__()
        self.drop_out = 0.25

        self.block_1 = nn.Sequential(
            # Pads the input tensor boundaries with zero
            # left, right, up, bottom
            nn.ZeroPad2d((8, 8, 0, 0)),
            nn.Conv2d(
                in_channels=1,  # input shape (1, C, T)
                out_channels=8,  # num_filters
                kernel_size=(1, 16),  # filter size 8HZ以上的滤波用于运动想象
                bias=False
            ),  # output shape (8, C, T)
            nn.BatchNorm2d(8)  # output shape (8, C, T)
        )

        # block 2 and 3 are implementations of Depthwise Convolution and Separable Convolution
        self.block_2 = nn.Sequential(
            nn.Conv2d(
                in_channels=8,  # input shape (8, C, T)
                out_channels=16,  # num_filters
                kernel_size=(22, 1),  # filter size
                # group8意味着八组滤波器
                groups=8,
                bias=False
            ),  # output shape (16, 1, T)
            nn.BatchNorm2d(16),  # output shape (16, 1, T)
            nn.ELU(),
            nn.AvgPool2d((1, 4)),  # output shape (16, 1, T//4)
            nn.Dropout(self.drop_out)  # output shape (16, 1, T//4)
        )

        self.block_3 = nn.Sequential(
            nn.ZeroPad2d((8, 8, 0, 0)),
            nn.Conv2d(
                in_channels=16,  # input shape (16, 1, T//4)
                out_channels=16,  # num_filters
                kernel_size=(1, 16),  # filter size
                # 十六组滤波器
                groups=16,
                bias=False
            ),  # output shape (16, 1, T//4)
            nn.Conv2d(
                in_channels=16,  # input shape (16, 1, T//4)
                out_channels=16,  # num_filters
                kernel_size=(1, 1),  # filter size
                bias=False
            ),  # output shape (16, 1, T//4)
            nn.BatchNorm2d(16),  # output shape (16, 1, T//4)
            nn.ELU(),
            nn.AvgPool2d((1, 8)),  # output shape (16, 1, T//32)
            nn.Dropout(self.drop_out)
        )

        self.out = nn.Linear((16 * 31), classes_num)

    def forward(self, x):
        x = self.block_1(x)
        # print("block1", x.shape)
        x = self.block_2(x)
        # print("block2", x.shape)
        x = self.block_3(x)
        # print("block3", x.shape)

        x = x.view(x.size(0), -1)
        x = self.out(x)
        # return F.softmax(x, dim=1), x  # return x for visualization
        return x
if __name__ == '__main__':
    input = torch.randn(32, 1, 22, 1000).to(device='cuda')

    model = EEGNet(4).to(device='cuda')
    summary(model, input_size=(1, 22, 1000), batch_size=32, device='cuda')

下面是论文中的图片

本次模型生成了8个时间滤波器,对于每个时间滤波器生成2个空间滤波器即16个空间滤波器

原论文提到dropout设置为0.25效果较好,且另外一种结构是生成4个时间滤波器,对于每个时间滤波器生成2个空间滤波器,大家可以自行测试

3.模型训练

import torch.nn
import torchvision.transforms as transforms
from torch import optim
from torch.utils.data import DataLoader
from EEGNet_pytorch_version import *
import pandas as pd

 

# 加载EEGnetdata
EEGnetdata = EEGNetDataset(file_path ='C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A03T.pt',
                            target_path ='C:\\Users\\24242\\DataspellProjects\\EEG_Project\\EEGNet\\A03T_target.pt',
                            transform=False,target_transform=False)


train_dataloader = DataLoader(EEGnetdata,shuffle=False,num_workers=0,batch_size=Config.train_batch_size,drop_last=True)

# 使用cuda的GPU去跑
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(device)
# 构建CNNnet
net = EEGNet().to(device)

# 损失函数
# criterion = torch.nn.MSELoss(reduction='mean').to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
# criterion = nn.MultiMarginLoss()
# optimizer = optim.SGD(net.parameters(), lr=0.8)
# 优化器
optimizer = optim.Adam(net.parameters(), lr=0.001)
counter = []
# 损失函数记录
loss_history = []
# 迭代次数
iteration_number = 0
train_correct = 0
total = 0
# 训练精度记录
train_accuracy = []
correct = 0
total = 0
# 分类数
classnum = 4
# 精确度记录
accuracy_history = []
# 继承了Model的train函数进行训练
# 当调用 train() 函数时,模型中的所有具有学习参数的层(例如卷积层、全连接层等)将被设置为训练模式。这意味着在模型的前向传播过程中,这些层将考虑到梯度的计算,并且在反向传播时会更新它们的权重和偏置。
net.train()
# 本次代码训练的是100个次
for epoch in range(0, Config.train_number_epochs):
    # index与element
    for i,data in enumerate(train_dataloader,0): #enumerate防止重复抽取到相同数据,数据取完就可以结束一个epoch
        # 从对象data中取到EEG_data和target
        item,target = data

        # 是否用GPU进行训练
        # item(48,22,20,50) 第一个维度epoch 第二个维度channels 第三个第四个维度为H*W
        item,target= item.to(device),target.to(device)
        print(item.shape)
        optimizer.zero_grad() #grad归零

        output = net(item)  #输出

        loss = criterion(output,target) #算loss 为损失函数,target原先为Tensor类型,指定target为long类型即可。
        loss.backward()   #反向传播算当前grad

        torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
        optimizer.step()  #optimizer更新参数
        #求ACC标准流程
        predicted=torch.argmax(output, 1)
        train_correct += (predicted == target).sum().item()
        total+=target.size(0) # total += target.size
        train_accuracy = train_correct / total
        train_accuracy = np.array(train_accuracy)
        
        if i % 10 == 0: #每10个epoch输出一次结果
                print("Epoch number {}\n Current Accuracy {}\n Current loss {}\n".format
                      (epoch, train_accuracy.item(),loss.item()))
        iteration_number += 1
        counter.append(iteration_number)
        accuracy_history.append(train_accuracy.item())
        loss_history.append(loss.item())
        
show_plot(counter,accuracy_history,loss_history)  
        



# 保存模型 参数
# torch.save(net.state_dict(),"The train_new_EEGNet.ph")
# 保存模型
# torch.save(net, "The train_new_EEGNet_A03.ph")

三、总结

闲下来水一篇文章,本篇文章只是一个小demo,是本人模型的复现记录,并没有进行测试,大家可以跑一跑模型,继续改进一下

  • 5
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 11
    评论
EEGNet是一种用于脑电信号分类的深度神经网络模型。下面是使用MATLAB实现EEGNet模型的代码示例,用于二分类任务。 首先,我们需要导入必要的MATLAB工具箱和数据集。在本示例中,我们使用了一个名为BCICIV_2a_gdf的公共EEG数据集,并将其分为训练集和测试集。 ```matlab % Load necessary toolboxes addpath(genpath('DeepLearnToolbox-master')); addpath(genpath('EEGNet-master')); % Load dataset [X_train, y_train, X_test, y_test] = load_BCICIV_2a_gdf(); ``` 接下来,我们定义EEGNet模型的结构。EEGNet模型由两个主要部分组成:一个深度卷积层和一个平均池化层。这个结构可以使用MATLAB的网络设计工具箱进行定义。 ```matlab % Define EEGNet model structure inputSize = [1 22 1000]; numFilters = 8; filterSize = [1 5]; poolSize = [1 2]; numClasses = 2; layers = [ imageInputLayer(inputSize) convolution2dLayer(filterSize, numFilters, 'Padding', 'same') batchNormalizationLayer clippedReluLayer averagePooling2dLayer(poolSize, 'Stride', 2) convolution2dLayer(filterSize, numFilters * 2, 'Padding', 'same') batchNormalizationLayer clippedReluLayer averagePooling2dLayer(poolSize, 'Stride', 2) fullyConnectedLayer(numClasses) softmaxLayer classificationLayer]; options = trainingOptions('adam', ... 'InitialLearnRate',0.01, ... 'MaxEpochs',50, ... 'MiniBatchSize',64, ... 'Shuffle','every-epoch', ... 'ValidationData',{X_test, y_test}, ... 'ValidationFrequency',10, ... 'Verbose',false, ... 'Plots','training-progress'); ``` 现在我们已经定义了EEGNet模型的结构,接下来我们需要将其编译并在训练集上进行训练。 ```matlab % Compile and train EEGNet model net = trainNetwork(X_train, y_train, layers, options); ``` 最后,我们可以使用训练好的EEGNet模型对测试集进行测试,并计算准确率和混淆矩阵。 ```matlab % Test EEGNet model on test set y_pred = classify(net, X_test); accuracy = sum(y_pred == y_test) / numel(y_test); confusion_matrix = confusionmat(y_test, y_pred); ``` 这就是一个使用MATLAB实现EEGNet模型的示例代码。通过这个示例代码,我们可以看到EEGNet模型的结构和训练过程,并且可以使用MATLAB内置的工具箱进行实现。
评论 11
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值