脑电数据预处理

此文章主要流程:

原始EEG信号加载

处理为mne库

计算FP2的FFT频率图谱

计算FP2的小波包分析

使用python库

# 引入python库
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

%matplotlib inline

一些自定义显示函数

from scipy.fftpack import fft, fftshift, ifft
from scipy.fftpack import fftfreq

# 频谱图
def show_fq_image(x,title):
#     x = eeg_filt[:,31]
    fs=128
    N=len(x)  # 采样点个数
    signalFFT=abs(fft(x,N)) #真实的幅值
    Y=np.array(signalFFT)*2/N
    f=np.array([ i for i in range(N//2+1)])*(fs/N)
    print(f.shape,Y[0:N//2].shape)
    
    fig = plt.figure(figsize=(10, 6)) # 指明画图区间的大小
    plt.plot(f[::10],Y[0:N//2+1:10])

#     plt.plot(f,Y[0:N//2+1])
    plt.ylabel('amp')
    plt.xlabel('frequency')
    plt.title(title)
    plt.grid()
    plt.show()

加载emotive数据转换为mne格式

# 加载emotive数据转换为mne格式
'''
把emotive的csv数据转换为mne格式
input 
     file_  csv数据相对本代码的地址
return
    eeg_channel 通道名字
    eeg_data  数据
    info 脑电数据信息

'''
def load_data(file_):
    df = pd.read_csv(file_)
    info = df.head(0) # EEG information
    key = np.array(df.index[0]) #0-2 Timestamp,EEG.Counter,EEG.Interpolated, 3-34 EEG channel
    value = np.array([list(i) for i in df.index[1:] ],dtype='float')

    print('eeg_orginal_shape:',value.shape)

    eeg_channel = key[3:35]
    eeg_data = value[:,3:35]
    # print(eeg_data[0:20])

    print('eeg_channel:',eeg_channel.shape)
    print('eeg_data:',eeg_data.shape)
    print("eeg_info",info)
    print(eeg_channel)
    
    
    med_row = np.median(eeg_data,axis=1)
    print(med_row.shape)

    # 减去平均值
    filtedData2 =eeg_data-np.matrix(med_row).T
    print(filtedData2.shape)

    # print(eeg_data[0:20])

    # 滑动滤波,把数据差值平滑到:-15到15
    # limit slew rate
    for j in range(1,filtedData2.shape[0]): 
        del_ = filtedData2[j,:] - filtedData2[j-1,:]
        del_ = [min(15,x) for x in np.squeeze(del_.tolist())]
        del_ = [max(-15,x) for x in del_]
        filtedData2[j,:] = eeg_data[j-1,:] + del_
    
    # High pass filter
    a = 0.06; # HPF filter coeffs
    b = 0.94;

    preVal = np.zeros(32)
    eeg_filt = np.zeros((filtedData2.shape[0],filtedData2.shape[1]))
    for j in range(filtedData2.shape[0]):
        preVal = a * filtedData2[j,:] + b * preVal;
        eeg_filt[j,:] = filtedData2[j,:] - preVal;
        
    print(eeg_filt.shape)

    
    # IIR滤波
    IIR_TC = 256                      
    EEG_data = eeg_filt
    rows, columns = eeg_data.shape[0],eeg_data.shape[1]
    filtedData = np.zeros((rows, columns))
    back_ = EEG_data[0,:]

    for r in range(1,rows):
        back_ = (back_ * ( IIR_TC- 1 ) + EEG_data[r,:])/IIR_TC
        filtedData[r,:] = EEG_data[r,:] - back_

    return eeg_channel,filtedData,info

    
eeg_channel,eeg_data,info = load_data('data/game_attention.csv')
eeg_orginal_shape: (7875, 100)
eeg_channel: (32,)
eeg_data: (7875, 32)
eeg_info Empty DataFrame
Columns: [title:g3,  start timestamp:1618472249.835243,  stop timestamp:1618472310.858389,  headset type:EPOCFLEX,  headset serial:F0000239,  headset firmware:f12,  channels:111,  sampling rate:128,  samples:7875      ,  version:2.1,  ]
Index: []
['EEG.Cz' 'EEG.Fz' 'EEG.Fp1' 'EEG.F7' 'EEG.F3' 'EEG.FC1' 'EEG.C3'
 'EEG.FC5' 'EEG.FT9' 'EEG.T7' 'EEG.CP5' 'EEG.CP1' 'EEG.P3' 'EEG.P7'
 'EEG.PO9' 'EEG.O1' 'EEG.Pz' 'EEG.Oz' 'EEG.O2' 'EEG.PO10' 'EEG.P8'
 'EEG.P4' 'EEG.CP2' 'EEG.CP6' 'EEG.T8' 'EEG.FT10' 'EEG.FC6' 'EEG.C4'
 'EEG.FC2' 'EEG.F4' 'EEG.F8' 'EEG.Fp2']
(7875,)
(7875, 32)
(7875, 32)

raw对象建立

'''
input
    sfreq = 128  # 采样频率
return
    raw # mne里面的一种eeg数据对象,封装了,数据,通道名字,等等信息
'''
def to_raw_object(eeg_channel,eeg_data,sfreq = 128):
    
    # 数组大小为 32 X 7875.
    mne_data = eeg_data.T  # 根据需要可以使用上面的两种方法消除dc偏移   [:4,:128*60]


    # 定义 channel types and names.
    ch_types = ['eeg']*eeg_channel.shape[0]
    ch_names = eeg_channel.tolist()

    """
    创建info对象
    """
    info = mne.create_info(ch_names=ch_names,
                           sfreq=sfreq, 
                           ch_types=ch_types)
    """
    利用mne.io.RawArray创建raw对象
    """
    raw = mne.io.RawArray(mne_data, info)
    
    # 设置raw对象的各个通道在脑电图的位置,方便绘制图形
    # raw.set_montage('standard_1020', raise_if_subset=False)
    
    return raw



raw = to_raw_object(eeg_channel,eeg_data,128)
# 原始eeg数据
scalings = 'auto'
raw.plot(n_channels=32, scalings=scalings,
         title='Auto-scaled orinalData from arrays',
         show=True, block=True)
plt.show()
# 功率频谱图
raw.plot_psd()
plt.show()
Creating RawArray with float64 data, n_channels=32, n_times=7875
    Range : 0 ... 7874 =      0.000 ...    61.516 secs
Ready.

 

数据预处理

'''
传入raw对象,然后处理成新的raw
input:
    raw
return:
    new_raw
'''

def pre_handel_data(raw):
    # 预处理:
    # 1.坏的通道
    # raw.drop({'ECG'})
    # 2.滤波
#     raw.filter(0.5, 128)
    # 3.设置参考电极,我们的也是这两个 双耳参考
    # raw.set_eeg_reference(['TP9', 'TP10'])
    raw.set_eeg_reference([])  # emotive已经使用了参考电极则不需要其它参考形式

    #4.在有外部刺激事件中,或者是脑电数据有变化时,需要用变化的数据去除没变化的数据,查看变化
    # 需要建立EPOchs对象
    # raw = mne.Epochs(data, events, event_id=[2], tmin=-0.2, tmax=1.0, baseline=(-0.2, 0.0), preload=True)
    return raw



info_fp2 = mne.create_info(ch_names=["FP2"],
                           sfreq=128, 
                           ch_types=["eeg"])
# 选择Fp2通道
raw_Fp2 = mne.io.RawArray([eeg_data[:,31]], info_fp2)

raw_Fp2 = pre_handel_data(raw_Fp2)

scalings = 'auto'
raw_Fp2.plot(n_channels=1, scalings=scalings,
             title='Auto-scaled FP2 from arrays',
             show=True, block=True)
plt.show()

raw_Fp2.plot_psd()
plt.show()

show_fq_image(eeg_data[:,31],'FP2')
Creating RawArray with float64 data, n_channels=1, n_times=7875
    Range : 0 ... 7874 =      0.000 ...    61.516 secs
Ready.
EEG channel type selected for re-referencing
EEG data marked as already having the desired reference.

 

 

频率特征的提取

https://blog.csdn.net/cqfdcw/article/details/84995904

import pywt

def wavelet_eeg(data):
    wp = pywt.WaveletPacket(data=data,wavelet='dmey')
    _len = len([node.path for node in wp.get_level(6, 'freq')])
    node_6_name = [node.path for node in wp.get_level(6, 'freq')]
    sfreq_table = {"δ":[1,3],"θ":[4,7],"α":[8,13],"β":[14,30]}
    sfreq_index = ["δ","θ","α","β"]
    # sfreq_data =  {"δ":[],"θ":[],"α":[],"β":[],"γ":[]}
    sfreq_object =  {"δ":object,"θ":object,"α":object,"β":object}
    wavelet_eeg_data = []


    # for i in range(_len):
    #     new_wp = pywt.WaveletPacket(data=None, wavelet='dmey')
    #     new_wp[node_6_name[i]] = wp[node_6_name[i]].data
    #     new_wp.reconstruct(update=True)
    #     show_fq_image(new_wp.data,"F:"+str(i))

    
    for x in range(len(sfreq_index)):
        # 3层
        f_start = sfreq_table[sfreq_index[x]][0]
        f_end = sfreq_table[sfreq_index[x]][1]

        sfreq_object[sfreq_index[x]] = pywt.WaveletPacket(data=None, wavelet='dmey',mode='smooth')

        for i in range(int(f_end-f_start)):
            sfreq_object[sfreq_index[x]][node_6_name[int(f_start)+i]] = wp[node_6_name[int(f_start)+i]]

        sfreq_object[sfreq_index[x]].reconstruct(update=True)

        Y = np.array(sfreq_object[sfreq_index[x]].data)
        wavelet_eeg_data.append(Y)
        show_fq_image(Y,sfreq_index[x]+"         frequency:"+str(f_start)+"-"+str(f_end))
    
    
    
        
    return wavelet_eeg_data


wavelet_eeg_data = wavelet_eeg(raw_Fp2.get_data()[0])
(3967,) (3966,)

(3967,) (3966,)

(3967,) (3966,)

(3967,) (3966,)

计算熵

eeg_ApEn=np.linalg.norm(wavelet_eeg_data, axis=1, keepdims=True)

sum_ApEn = np.sum(eeg_ApEn)

tags = ["δ","θ","α","β"]

print(eeg_ApEn,sum_ApEn,eeg_ApEn/sum_ApEn)


fig,ax=plt.subplots()
ax.bar([i+1 for i in range(4)],[x[0] for x in eeg_ApEn/sum_ApEn], width=0.5)
ax.set_xlabel("feature")  #设置x轴标签
ax.set_ylabel("importance")  #设置y轴标签
ax.set_title("feature importance")  #设置标题
#添加x坐标对应的label
plt.xticks([i+1 for i in range(4)],tags)
plt.show()  #显示图像
[[1851.00903921]
 [ 914.30063799]
 [ 514.65745166]
 [ 927.00523866]] 4206.972367520575 [[0.43998602]
 [0.21732984]
 [0.1223344 ]
 [0.22034973]]

 

  • 2
    点赞
  • 51
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值