EEG(脑电图)信号进行情绪识别--附部分课程作业答辩PPT(数据集介绍,数据预处理与模型训练,训练过程等部分)

本文介绍了使用EEGNet模型处理SEED数据集的一种数据划分策略——leave-one-subject-out,详细阐述了数据预处理、模型架构、以及训练过程中的数据加载、模型训练和评估。实验结果显示了在不同划分策略下的性能对比。
摘要由CSDN通过智能技术生成

数据集

  • SEED
  • SEED-IV
  • DEAP

数据集划分策略

  • Subject-dependent
  • Subject-independent
  • leave-one-subject-out

模型

  • EEGNet
  • DeepNet
  • ShallowconvNet
  • TimesNet
  • Conformer

部分数据划分策略的结果

在这里插入图片描述

leave-one-subject-out策略代码,EEGNet,SEED Dataset,DE


import csv
import os
gpus = [0]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, gpus))
import numpy as np
import math
import glob
import random
import itertools
import datetime
import time
import datetime
import sys
import scipy.io
 
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from sklearn.utils import shuffle  # 用于便捷的打乱数据
 
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.autograd as autograd
from torchvision.models import vgg19
 
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn.init as init
 
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
from sklearn.decomposition import PCA
 
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
 
from torch import nn
from torch import Tensor
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
# from common_spatial_pattern import csp
 
import matplotlib.pyplot as plt
# from torch.utils.tensorboard import SummaryWriter
from torch.backends import cudnn
 
class EEGNet(nn.Module):
    def __init__(self, nb_classes=3, Chans=62, Samples=200, dropoutRate=0.5, 
                 kernLength=100, F1=12, D=2, F2=24, norm_rate=0.25, dropoutType='Dropout'):
        super(EEGNet, self).__init__()
        self.T = Samples  # 时间点数或样本数
 
        # Block 1
        self.conv1 = nn.Conv2d(1, F1, (1, kernLength), padding='same', bias=False)  # 第一层卷积层
        self.batchnorm1 = nn.BatchNorm2d(F1)  # 第一层批标准化
        # 深度可分离卷积
        self.depthwiseConv1 = nn.Conv2d(F1, F1 * D, (Chans, 1), groups=F1, bias=False)  # 深度卷积层
        self.batchnorm2 = nn.BatchNorm2d(F1 * D)  # 深度卷积后的批标准化
        self.activation_block1 = nn.ELU()  # 激活函数
        self.pooling1 = nn.AvgPool2d(1, 6)  # 平均池化
        if dropoutType == 'Dropout':  # 判断dropout类型
            self.dropout1 = nn.Dropout(p=dropoutRate)  # dropout层
 
        # Block 2
        self.separableConv1 = nn.Conv2d(F1 * D, F2, (1, 24), padding='same', bias=False)  # 可分离卷积层
        self.batchnorm3 = nn.BatchNorm2d(F2)  # 可分离卷积后的批标准化
        self.activation_block2 = nn.ELU()  # 激活函数
        self.pooling2 = nn.AvgPool2d(1, 12)  # 平均池化
        self.dropout2 = nn.Dropout(p=dropoutRate)  # dropout层
 
        # 全连接层
        self.flatten = nn.Flatten()  # 展平层
        self.fc1 = nn.Linear(72, nb_classes)  # 全连接层,将展平的数据转为最终的分类结果
 
    def forward(self, x):
        # Block 1
        x = self.conv1(x)  # 通过第一层卷积
        x = self.batchnorm1(x)  # 批标准化
        x = self.depthwiseConv1(x)  # 深度卷积
        x = self.batchnorm2(x)  # 批标准化
        x = self.activation_block1(x)  # 激活函数
        x = self.pooling1(x)  # 池化
        x = self.dropout1(x)  # dropout
 
        # Block 2
        x = self.separableConv1(x)  # 可分离卷积
        x = self.batchnorm3(x)  # 批标准化
        x = self.activation_block2(x)  # 激活函数
        x = self.pooling2(x)  # 池化
        x = self.dropout2(x)  # dropout
 
        # 全连接层
        x = self.flatten(x)  # 展平
        x = self.fc1(x)  # 全连接层
        return x
 
 
def count_parameters(model):
        return sum(p.numel() for p in model.parameters() if p.requires_grad)
class ExP():
    def __init__(self):
        super(ExP, self).__init__()
        self.batch_size = 128
        self.n_epochs = 400
        self.c_dim = 3
        self.lr = 0.001
        self.b1 = 0.5
        self.b2 = 0.999
 
        self.start_epoch = 0
 
 
        self.Tensor = torch.cuda.FloatTensor
        self.LongTensor = torch.cuda.LongTensor
 
        self.criterion_l1 = torch.nn.L1Loss().cuda()
        self.criterion_l2 = torch.nn.MSELoss().cuda()
        self.criterion_cls = torch.nn.CrossEntropyLoss().cuda()
 
        self.model = EEGNet().cuda()
        # self.model = EEGNet()
        # self.model = nn.DataParallel(self.model, device_ids=[i for i in range(len(gpus))])
        self.model = self.model.cuda()
        # self.model = self.model
 
        # 打印模型的总参数量
        print(f'Total parameters: {count_parameters(self.model)}')
 
        # summary(self.model, (1, 22, 1000))
        self.data_list = []
        self.labels_list = []
 
    def load_mat(self, file_path):
        return scipy.io.loadmat(file_path)
    
    def shuffle_data(self):
        """Shuffles data and labels in unison."""
        self.data_list, self.labels_list = shuffle(self.data_list, self.labels_list)
 
    def process_segments(self, data, label, subject_idx):
        for i in range(1, 16):  # 从1到15遍历
            suffix = f'eeg{i}'  # 构造后缀,如eeg1, eeg2,...,eeg15
            for key in data.keys():  # 遍历所有键
                if key.endswith(suffix):  # 检查键是否以特定后缀结束
                    original_array = data[key]
                    if isinstance(original_array, np.ndarray):  # 确保是数组结构
                        for start in range(1000, 37000 - 200 + 1, 200):
                            segment = original_array[:, start:start+200]
                            segment_label = label['label'][0][i-1] + 1  # 使用i-1获取对应的标签
                            self.data_list[subject_idx].append(segment)
                            self.labels_list[subject_idx].append(segment_label)
                    break  # 找到匹配的键后不需要继续在data.keys()中查找
 
 
    def get_source_data(self):
        file_paths = [
            '../../SEED/Preprocessed_EEG/1_20131027.mat', '../../SEED/Preprocessed_EEG/1_20131030.mat', '../../SEED/Preprocessed_EEG/1_20131107.mat',
            '../../SEED/Preprocessed_EEG/2_20140404.mat', '../../SEED/Preprocessed_EEG/2_20140413.mat', '../../SEED/Preprocessed_EEG/2_20140419.mat',
            '../../SEED/Preprocessed_EEG/3_20140603.mat', '../../SEED/Preprocessed_EEG/3_20140611.mat', '../../SEED/Preprocessed_EEG/3_20140629.mat',
            '../../SEED/Preprocessed_EEG/4_20140621.mat', '../../SEED/Preprocessed_EEG/4_20140702.mat', '../../SEED/Preprocessed_EEG/4_20140705.mat',
            '../../SEED/Preprocessed_EEG/5_20140411.mat', '../../SEED/Preprocessed_EEG/5_20140418.mat', '../../SEED/Preprocessed_EEG/5_20140506.mat',
            '../../SEED/Preprocessed_EEG/6_20130712.mat', '../../SEED/Preprocessed_EEG/6_20131016.mat', '../../SEED/Preprocessed_EEG/6_20131113.mat',
            '../../SEED/Preprocessed_EEG/7_20131027.mat', '../../SEED/Preprocessed_EEG/7_20131030.mat', '../../SEED/Preprocessed_EEG/7_20131106.mat',
            '../../SEED/Preprocessed_EEG/8_20140511.mat', '../../SEED/Preprocessed_EEG/8_20140514.mat', '../../SEED/Preprocessed_EEG/8_20140521.mat',
            '../../SEED/Preprocessed_EEG/9_20140620.mat', '../../SEED/Preprocessed_EEG/9_20140627.mat', '../../SEED/Preprocessed_EEG/9_20140704.mat',
            '../../SEED/Preprocessed_EEG/10_20131130.mat', '../../SEED/Preprocessed_EEG/10_20131204.mat', '../../SEED/Preprocessed_EEG/10_20131211.mat',
            '../../SEED/Preprocessed_EEG/11_20140618.mat', '../../SEED/Preprocessed_EEG/11_20140625.mat', '../../SEED/Preprocessed_EEG/11_20140630.mat',
            '../../SEED/Preprocessed_EEG/12_20131127.mat', '../../SEED/Preprocessed_EEG/12_20131201.mat', '../../SEED/Preprocessed_EEG/12_20131207.mat',
            '../../SEED/Preprocessed_EEG/13_20140527.mat', '../../SEED/Preprocessed_EEG/13_20140603.mat', '../../SEED/Preprocessed_EEG/13_20140610.mat',
            '../../SEED/Preprocessed_EEG/14_20140601.mat', '../../SEED/Preprocessed_EEG/14_20140615.mat', '../../SEED/Preprocessed_EEG/14_20140627.mat',
            '../../SEED/Preprocessed_EEG/15_20130709.mat', '../../SEED/Preprocessed_EEG/15_20131016.mat', '../../SEED/Preprocessed_EEG/15_20131105.mat'
        ]
 
        label = self.load_mat('../../SEED/Preprocessed_EEG/label.mat')
 
        # 数据列表和标签列表现在将是列表的列表,每个内部列表代表一个subject的数据
        self.data_list = [[] for _ in range(15)]  # 15个subjects
        self.labels_list = [[] for _ in range(15)]
 
        for subject_index, file_path in enumerate(file_paths):
            print(file_path)
            data = self.load_mat(file_path)
            # 计算subject的索引,每3个文件对应一个subject
            subject_idx = subject_index // 3
            self.process_segments(data, label, subject_idx)
 
        # 将数据和标签转换为适当的格式
        for idx in range(15):
            self.data_list[idx] = np.array(self.data_list[idx], dtype=np.float32)
            self.labels_list[idx] = np.array(self.labels_list[idx], dtype=np.int64)
 
            # Reshape data if necessary and convert to torch tensors
            self.data_list[idx] = np.expand_dims(self.data_list[idx], axis=1)
            self.data_list[idx] = torch.tensor(self.data_list[idx])
            self.labels_list[idx] = torch.tensor(self.labels_list[idx])
 
            # Standardize the data
            self.data_list[idx] = (self.data_list[idx] - torch.mean(self.data_list[idx])) / torch.std(self.data_list[idx])
 
        # 返回一个包含所有subject数据和标签的列表
        return self.data_list, self.labels_list
    
    def reset_model(self):
        self.model = EEGNet().cuda()  # 重新实例化模型
 
    def train(self):
        all_data, all_labels = self.get_source_data()  # 加载所有subjects的数据和标签
 
        # 打开总结性CSV文件,用于记录每个subject的最佳测试精度
        with open('results/EEG_EEGNet_SEED_Summary.csv', 'w', newline='') as summary_file:
            summary_writer = csv.writer(summary_file)
            summary_writer.writerow(['Subject', 'Best Test Accuracy'])
 
            for test_subject in range(15):  # 遍历所有subjects
                print(f"Starting test on subject {test_subject+1}")
                best_test_accuracy = 0.0
 
                # 准备训练集和测试集
                train_data = torch.cat([all_data[i] for i in range(15) if i != test_subject], 0)
                train_labels = torch.cat([all_labels[i] for i in range(15) if i != test_subject], 0)
                test_data = all_data[test_subject]
                test_labels = all_labels[test_subject]
 
                # 创建数据加载器
                train_dataset = torch.utils.data.TensorDataset(train_data, train_labels)
                test_dataset = torch.utils.data.TensorDataset(test_data, test_labels)
                train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
                test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False)
 
                # 重置模型和优化器
                self.reset_model()
                optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, betas=(self.b1, self.b2))
 
                # 为当前subject打开一个新的CSV文件
                with open(f'results/Subject_{test_subject+1}.csv', 'w', newline='') as subject_file:
                    subject_writer = csv.writer(subject_file)
                    subject_writer.writerow(['Epoch', 'Train Loss', 'Train Accuracy', 'Test Loss', 'Test Accuracy', 'Epoch Duration(s)'])
 
                    for epoch in range(self.n_epochs):
                        epoch_start_time = time.time()  # Epoch start time
 
                        # 初始化每个epoch的指标
                        train_loss, train_corrects, train_total = 0, 0, 0
                        test_loss, test_corrects, test_total = 0, 0, 0
 
                        # 训练模式
                        self.model.train()
                        for inputs, targets in train_loader:
                            inputs, targets = inputs.cuda(), targets.cuda()
                            optimizer.zero_grad()
                            outputs = self.model(inputs)
                            loss = self.criterion_cls(outputs, targets)
                            loss.backward()
                            optimizer.step()
 
                            train_loss += loss.item() * inputs.size(0)
                            _, preds = torch.max(outputs, 1)
                            train_corrects += torch.sum(preds == targets.data)
                            train_total += inputs.size(0)
 
                        avg_train_loss = train_loss / train_total
                        train_accuracy = train_corrects.double() / train_total
 
                        # 测试模式
                        self.model.eval()
                        with torch.no_grad():
                            for inputs, targets in test_loader:
                                inputs, targets = inputs.cuda(), targets.cuda()
                                outputs = self.model(inputs)
                                loss = self.criterion_cls(outputs, targets)
 
                                test_loss += loss.item() * inputs.size(0)
                                _, preds = torch.max(outputs, 1)
                                test_corrects += torch.sum(preds == targets.data)
                                test_total += inputs.size(0)
 
                        avg_test_loss = test_loss / test_total
                        test_accuracy = test_corrects.double() / test_total
 
                        epoch_duration = time.time() - epoch_start_time
 
                        # 将结果写入当前subject的CSV文件
                        subject_writer.writerow([epoch+1, avg_train_loss, train_accuracy.item(), avg_test_loss, test_accuracy.item(), epoch_duration])
 
                        # 打印结果
                        print(f'Subject {test_subject+1}, Epoch {epoch+1}: Train Loss: {avg_train_loss:.4f}, Train Acc: {train_accuracy:.4f}, Test Loss: {avg_test_loss:.4f}, Test Acc: {test_accuracy:.4f}, Duration: {epoch_duration:.2f}s')
 
                        # 更新最佳精度
                        if test_accuracy > best_test_accuracy:
                            best_test_accuracy = test_accuracy
 
                # 将当前subject的最佳测试精度写入总结性CSV文件
                summary_writer.writerow([test_subject+1, best_test_accuracy])
 
        # 计算所有subjects的平均准确率
        average_accuracy = np.mean([acc for _, acc in global_results])
        print(f'Average Accuracy: {average_accuracy:.4f}')
 
        return average_accuracy
 
 
 
 
def main():
    best = 0
    aver = 0
    result_write = open("./results/sub_result.txt", "w")
 
    for i in range(1):  
        starttime = datetime.datetime.now()
 
        # Setting seed for reproducibility
        seed_n = np.random.randint(2023)
        print('seed is ' + str(seed_n))
        random.seed(seed_n)
        np.random.seed(seed_n)
        torch.manual_seed(seed_n)
        torch.cuda.manual_seed(seed_n)
        torch.cuda.manual_seed_all(seed_n)
 
        print('Subject %d' % (i + 1))
        exp = ExP(i + 1)
 
        fold_accuracy = exp.train()
        print('The average accuracy across folds for Subject %d is: %.4f' % (i + 1, fold_accuracy))
 
        # Writing results
        result_write.write('Subject ' + str(i + 1) + ' : ' + 'Seed is: ' + str(seed_n) + "\n")
        result_write.write('Subject ' + str(i + 1) + ' : ' + 'The average accuracy across folds is: ' + str(fold_accuracy) + "\n")
 
        endtime = datetime.datetime.now()
        print('Subject %d duration: '%(i+1) + str(endtime - starttime))
        aver += fold_accuracy  # Accumulate average accuracy
 
    aver = aver / 1  # Change 1 to the number of subjects you are testing if you adjust the loop
 
    # Writing overall results
    result_write.write('The overall average accuracy across all subjects is: ' + str(aver) + "\n")
    result_write.close()
 
if __name__ == "__main__":
    print(time.asctime(time.localtime(time.time())))
    main()
    print(time.a![请添加图片描述](https://img-![请添加图片描述](https://img-···

附部分PPT截图

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值