项目 1000+例 COVID‐19病人体征深度分析

实验要求

数据集不可开源

  • 特征选择 获取关键参数判断患者的症状程度
      利用Lasso LassCV ElasticNet RandomForest

  • 通过一组早期参数预测重症或危重
     使用SVM CNN KNN LSTM分类器

  • 通过一组早期参数预测临床结局
     使用SVM CNN KNN LSTM分类器

  • 分析哪些临床参数与抗体水平有关。并考虑时序性
     与上面类似,只是把因变量换成抗体水平

  • 分析不同参数之间的依赖关系(CCA)
     因变量: 症状程度、出院/死亡、抗体水平


相关实验源代码

 

一.特征选择部分
Lass LassCV ElasticNet

import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Microsoft Yahei']
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from group_lasso import GroupLasso
from sklearn.linear_model import Lasso,Ridge, RidgeCV, ElasticNet, LassoCV
from sklearn.model_selection import train_test_split, cross_val_score,GridSearchCV

#行
def del_rows(data):
    t = int(0.8*data.shape[1])
    data = data.dropna(thresh=t)#保留至少有70%非空的行
    return data

df = pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df = df.drop(['病人ID'],axis=1)
df = df.drop(['发病日期'],axis=1)
df = df.drop(['入院时间'],axis=1)
df = df.drop(['出院/死亡时间'],axis=1)
df = df.drop(['检测日期'],axis=1)
df = df.drop(['发病天数'],axis=1)
df['性别']=df['性别'].astype(str).map({'女':0,'男':1})
df['临床结局 ']=df['临床结局 '].astype(str).map({'出院':0,'死亡':1})
df['严重程度(最终)']=df['严重程度(最终)'].astype(str).map({'无症状感染者':0,'轻型':0,'重型':1,'危重型':1})
df['是否进入ICU']=df['是否进入ICU'].astype(str).map({'否':0,'是':1})
X = df.drop(['严重程度(最终)'],axis=1).fillna(0)
y = df['严重程度(最终)'].fillna(0)
seed=5
x_train,x_test,y_train,y_test = train_test_split(X, y, test_size=0.3, random_state=seed)

def rmse_cv(model):
    rmse= np.sqrt(-cross_val_score(model, x_train,y_train, scoring="neg_mean_squared_error", cv = 3))
    return(rmse)

#调用函数,并进行交叉验证,默认cv=3
model_lasso = LassoCV().fit(x_train,y_train)
#输出看模型最终选择了几个特征向量,剔除了几个特征向量
feature = pd.Series(model_lasso.coef_, index = x_train.columns)
print("Lasso picked " + str(sum(feature != 0)) + " variables and eliminated the other " +  str(sum(feature == 0)) + " variables")
#画出特征变量的重要程度,这里面选出前10个重要的特征
feature_important = pd.concat([feature.sort_values().tail(10)])
print(feature_important)

#画图部分
matplotlib.rcParams['figure.figsize'] = (11.0, 5.0)
feature_important.plot(kind="barh")
plt.title("Selections in the Lasso Model")
plt.show()
  •  调用相对应的函数,即可实现Lasso LassCV ElasticNet特征选择

随机森林

import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Microsoft Yahei']
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split

#删除缺失值超过20%非空的行
def del_rows(data):
    t = int(0.8*data.shape[1])
    data = data.dropna(thresh=t)
    return data
#数据预处理
df = pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df = df.drop(['病人ID'],axis=1)
df = df.drop(['发病日期'],axis=1)
df = df.drop(['入院时间'],axis=1)
df = df.drop(['出院/死亡时间'],axis=1)
df = df.drop(['检测日期'],axis=1)
df = df.drop(['发病天数'],axis=1)
df['性别']=df['性别'].astype(str).map({'女':0,'男':1})
df['临床结局 ']=df['临床结局 '].astype(str).map({'出院':0,'死亡':1})
df['严重程度(最终)']=df['严重程度(最终)'].astype(str).map({'无症状感染者':0,'轻型':0,'重型':1,'危重型':1})
df['是否进入ICU']=df['是否进入ICU'].astype(str).map({'否':0,'是':1})
#df = df.dropna(axis=0,how='any',thresh=None,subset=['S1_IgG','N_IgG'],inplace=False)
X = df.drop(['临床结局 '],axis=1).fillna(0)
y = df['临床结局 '].fillna(0)
#切分
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
#运用随机森林
feat_labels = df.columns[1:]
model = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1)
model.fit(x_train, y_train.astype('int'))
importance = pd.Series(model.feature_importances_, index=X.columns)
importance = importance.sort_values().tail(10)
print(importance)
#作图
matplotlib.rcParams['figure.figsize'] = (12.0, 5.0)
importance.plot(kind="barh")
plt.title("Selections in the RandomForest Model")
plt.show()

二.分类器 预测

KNN + 交叉验证选取最优k

import scipy
import numpy as np
import pandas as pd
from pylab import *
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier

#去除缺失值超过20%的行
def del_rows(data):
    t = int(0.8*data.shape[1])
    data = data.dropna(thresh=t)
    return data

#数据预处理
df = pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df['性别']=df['性别'].astype(str).map({'女':0,'男':1})
df['临床结局 ']=df['临床结局 '].astype(str).map({'出院':0,'死亡':1})
df['严重程度(最终)']=df['严重程度(最终)'].astype(str).map({'无症状感染者':0,'轻型':0,'重型':1,'危重型':1})
df['是否进入ICU']=df['是否进入ICU'].astype(str).map({'否':0,'是':1})
df = df.dropna(axis=0,how='any',thresh=None,subset=['S1_IgG','N_IgG'],inplace=False)
data = del_rows(df)
X = data[['血_中性粒细胞(%)','血_超敏C反应蛋白','血_肌红蛋白','血_乳酸脱氢酶','血_LDH*0.9','年龄','血_白细胞介素6','血_肌酐','血_白细胞介素8','血_铁蛋白']]
Y = data['临床结局 ']
seed = 5
from sklearn import neighbors
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = seed)

#数据标准化
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)


#画图选取最优k值
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
param_name = 'n_neighbors'
param_range = range(1, 51)

# scoring: 分类用 accuracy, 回归用 mean_squared_error
train_scores, test_scores = validation_curve(
    KNeighborsClassifier(), X_train, y_train, cv=5,
    param_name=param_name, param_range=param_range,
    scoring='accuracy')

train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)

plt.plot(param_range,train_scores_mean, color='red', label='train')
plt.plot(param_range, test_scores_mean, color='green', label='test')
plt.legend('best')
plt.xlabel('param range of k')
plt.ylabel('scores mean')
plt.show()

#预测
classifier = KNeighborsClassifier(n_neighbors=16)
classifier.fit(x_train, y_train)
score = classifier.score(x_test, y_test)
print(score)
from sklearn.metrics import classification_report, accuracy_score
y_predict = np.round(classifier.predict(x_test)).astype(int)
print('Results for Binary Model')
print(accuracy_score(y_test,y_predict))
print(classification_report(y_test,y_predict))

SVM+5折交叉验证

# 正常输出中文
import io
import sys
import pandas as pd
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')

#去除缺失值超过20%的行
def del_rows(data):
    t = int(0.8 * data.shape[1])
    data = data.dropna(thresh=t)
    return data
#数据预处理
df = pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df['性别'] = df['性别'].astype(str).map({'女': 0, '男': 1})
df['临床结局 '] = df['临床结局 '].astype(str).map({'出院': 0, '死亡': 1})
df['严重程度(最终)'] = df['严重程度(最终)'].astype(str).map({'无症状感染者': 0, '轻型': 0, '重型': 1, '危重型': 1})
df['是否进入ICU'] = df['是否进入ICU'].astype(str).map({'否': 0, '是': 1})

data = del_rows(df)
X = df[['年龄','S1_IgG','血_D-D二聚体定量','血_乳酸脱氢酶','血_LDH*0.9','血_中性粒细胞(%)','血_RBC分布宽度SD',
      'N_IgG','血_淋巴细胞(%)','血_中性粒细胞(#)']].fillna(0)
y = df['严重程度(最终)'].fillna(0)

from sklearn import svm
from sklearn.model_selection import cross_val_score

clf = svm.SVC(probability=True)
scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy')
print(cross_val_score(clf, X, y, cv=5, scoring='accuracy'))
print('Cross_val_score.Mean:')
print(scores.mean())

CNN + validation + loss图

import scipy
import numpy as np
import pandas as pd
from pylab import *
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Flatten
 
#去除缺失值大于20%的行
def del_rows(data):
    t = int(0.8*data.shape[1])
    data = data.dropna(thresh=t)#保留至少有t个非空的行
    #data = data[(data.T != 0).any()]
    return data

#数据预处理
df = pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df['性别']=df['性别'].astype(str).map({'女':0,'男':1})
df['临床结局 ']=df['临床结局 '].astype(str).map({'出院':0,'死亡':1})
df['严重程度(最终)']=df['严重程度(最终)'].astype(str).map({'无症状感染者':0,'轻型':0,'重型':1,'危重型':1})
df['是否进入ICU']=df['是否进入ICU'].astype(str).map({'否':0,'是':1})
#df = df.dropna(axis=0,how='any',thresh=None,subset=['S1_IgG','N_IgG'],inplace=False)
data = del_rows(df)
X = df[['年龄','血_LDH*0.9','血_超敏C反应蛋白','血_乳酸脱氢酶','血_肌红蛋白','血_平均血红蛋白浓度',
      '血_白细胞介素2受体','血_氨基末端脑钠肽前体(NT-proBNP)']].fillna(0)
y = df['严重程度(最终)'].fillna(0)
seed = 5
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = seed)

#标准化
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)

#搭建神经网络
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras import regularizers
import keras
def create_model():
    model = Sequential()
    model.add(Dense(8, activation='relu', input_dim=8))
    Dropout(0.5)
    model.add(Dense(6, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    adam = Adam(lr=0.001)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return model
model = create_model()
history = model.fit(x_train,y_train, epochs=120,batch_size=64, validation_data=(x_test, y_test), verbose=2, shuffle=False)

#评估模块
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()

LSTM + validation

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam

#excel -> array
def  excel_array(df):
    resArray=[] #先声明一个空list
    resArray = df.values.tolist()
    return resArray

#different divide  
def division_X(data):
    list_new = []  # 处理后的列表,是一个二维列表
    list_short = []
    for i in range(0, len(data) - 1):
        # 遍历列表id  前与后相比,相同的该行进行拼接
        if data[i][0] == data[i + 1][0]:
            list_short.append(data[i])
        else:
            list_new.append(list_short)
            list_short = []
    return list_new
def division_Y(data):
    list_short = []
    list_short.append(data[0])
    for i in range(0, len(data) -1):
        # 遍历列表id  前与后相比,相同的该行进行拼接
        if data[i][0] != data[i + 1][0]:
            list_short.append(data[i+1])
    return list_short

#去除缺失值大于20%的行
def del_rows(data):
    t = int(0.7*data.shape[1])
    data = data.dropna(thresh=t)# 
    return data

#数据预处理
df =  pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df['性别']=df['性别'].astype(str).map({'女':0,'男':1})
df['临床结局 ']=df['临床结局 '].astype(str).map({'出院':0,'死亡':1})
df['严重程度(最终)']=df['严重程度(最终)'].astype(str).map({'无症状感染者':0,'轻型':0,'重型':1,'危重型':1})
df['是否进入ICU']=df['是否进入ICU'].astype(str).map({'否':0,'是':1})
#df = df.dropna(axis=0,how='any',thresh=None,subset=['S1_IgG','N_IgG'],inplace=False)
data = df[['病人ID','临床结局 ','血_超敏C反应蛋白','血_肌红蛋白','血_乳酸脱氢酶','血_LDH*0.9','血_白细胞介素6','血_平均血红蛋白浓度',
      '血_铁蛋白','血_白细胞介素2受体','血_氨基末端脑钠肽前体(NT-proBNP)']]
data = del_rows(data)
X =  data[['病人ID','血_超敏C反应蛋白','血_肌红蛋白']]
X = X.fillna(0)
X = excel_array(X)
X = division_X(X)
for i in X:
    for j in i:
        j.pop(0)

Y = data[['病人ID','临床结局 ']]
Y = Y.fillna(0)
Y = excel_array(Y)
Y = division_Y(Y)
y = []
for i in Y:
        y.append(i[-1])
y = y[:-1]
print(y)
print(len(y))


#构建Lstm模型
from sklearn.model_selection import train_test_split
from keras.layers import Input, Masking
import keras as ks
seed=5
x_train,x_test,y_train,y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
x_train=ks.preprocessing.sequence.pad_sequences(x_train,maxlen=15,padding='post',value=0.)
x_test=ks.preprocessing.sequence.pad_sequences(x_test,maxlen=15,padding='post',value=0.)
from keras import regularizers
from keras.layers import Dropout
def create_model():
    model = Sequential()
    model.add(Masking(mask_value=0., input_shape=(np.array(x_train).shape[1],np.array(x_train).shape[2])))
    model.add(LSTM(32,kernel_regularizer=regularizers.l1_l2(0.2)))
    model.add(Dense(16,kernel_initializer='uniform', activation='relu'))
    Dropout(0.1)
    model.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return model
model = create_model()
history = model.fit(x_train,y_train, epochs=120,batch_size=80, validation_data=(x_test, y_test), verbose=2, shuffle=False)

#评估模块
from sklearn.metrics import classification_report, accuracy_score
y_predict = np.round(model.predict(x_test)).astype(int)
print('Results for Binary Model')
print(accuracy_score(y_test,y_predict))
print(classification_report(y_test,y_predict))

代码简述

 - excel_array    division_X    division_Y 
 将特征与标签处理成如下形式
 [ [[x1,x2,x3],时序性特征
    [y1,y2,y3],
    [z1,z2,z3]]     对应标签[1]]   
 
 - x_train=ks.preprocessing.sequence.pad_sequences(x_train,maxlen=15,padding='post',value=0.)
  由于输入的数据是长短不同的时间序列
  将使用pad_sequences函数补成相同长度的时间序列(使用0补全)
  在模型中使用Masking层忽略因缺失补的0

其实1-3问题均类似,只是变换了因变量

  • 由于抗体水平是连续数据,因此使用等宽法将连续数据离散化,将抗体水平等宽分为0,1
    即连续数据离散化,预测问题转化为二分类问题
等宽法
d1 = pd.cut(data, k, labels=range(k))
y = d1

三.分析不同参数之间的依赖关系(CCA)

特征选择

import numpy as np
import pandas as pd
from sklearn.cross_decomposition import CCA

#去除缺失值大于20%的行
def del_rows(data):
    t = int(0.8*data.shape[1])
    data = data.dropna(thresh=t) 
    return data

#数据预处理 
df =  pd.read_excel(r'C:\Users\Somewhere\PycharmProjects\Machine learning COVID‐19 蔡加欣\临床指标简化整理    实验报告.xlsx')
df = df.drop(['病人ID'],axis=1)
df = df.drop(['性别'],axis=1)
df = df.drop(['年龄'],axis=1)
df = df.drop(['发病日期'],axis=1)
df = df.drop(['入院时间'],axis=1)
df = df.drop(['出院/死亡时间'],axis=1)
df = df.drop(['检测日期'],axis=1)
df = df.drop(['发病天数'],axis=1)
df = df.dropna(axis=0,how='any',thresh=None,subset=['S1_IgG','N_IgG'],inplace=False)
df['临床结局 ']=df['临床结局 '].astype(str).map({'出院':0,'死亡':1})
df['严重程度(最终)']=df['严重程度(最终)'].astype(str).map({'无症状感染者':0,'轻型':1,'重型':2,'危重型':3})
df['是否进入ICU']=df['是否进入ICU'].astype(str).map({'否':0,'是':1})
k=2
y=df['S1_IgG']
d1 = pd.cut(y, k, labels=range(k))
y = d1
df = df.drop(['S1_IgG'],axis=1)
X = df.fillna(0)

#计算输出前6个CCA典型相关性系数最高的系数
L = []
for index in X.columns:
    New = []
    cca = CCA(n_components=1)
    cca.fit(X[[index]],y)
    X_train_r, Y_train_r = cca.transform(X[[index]], y)
    New.append(np.corrcoef(X_train_r[:, 0], Y_train_r[:, 0])[0, 1])
    New.append(index)
    L.append(New)
def takesecond(elem):
    return elem[0]
L.sort(key=takesecond,reverse=True)
for x in L[0:7]:
    print(x)

散点图

color = sns.color_palette()
sns.set_style('darkgrid')
fig, ax = plt.subplots()
#定义颜色
colors1 = '#000000'
colors2 = '#FF0000'
#根据实际需要,划定散点图的范围 
plt.xlim(0,3000)
plt.ylim(0,60000)
 
ax.scatter(x=df['血_铁蛋白'],y=df['S1_IgG'],s=3,c=colors2)
#x,y轴名称
plt.xlabel('血_嗜碱细胞(#)',fontsize=13,fontproperties=font)
plt.ylabel('S1_IgG',fontsize=13,fontproperties=font)
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值