『深度应用』首届中国心电智能大赛复赛开源(第三十一名,得分0.841484)

初赛开源及讲解博客:https://blog.csdn.net/xiaosongshine/article/details/88972196

复赛介绍博客:https://blog.csdn.net/xiaosongshine/article/details/95326320

代码包括两个部分,1.训练代码和2.预测代码

1.训练代码,用的了KFold,加权loss与残差连接

from glob import glob
import pandas as pd
import numpy as np
from scipy.io import loadmat
import math
from matplotlib import pyplot as plt
from keras.layers import *
from keras.models import *
from keras.optimizers import *
import keras
from keras import backend as K
#import tqdm
import random
import tensorflow as tf

seed = 1234
random.seed(seed)
tf.set_random_seed(seed)



base_path = "/media/jdcloud/"
train_base_dir = base_path+"Train/"
val_base_dir = base_path+"Val/"

ref_path = base_path+"reference.csv"
my_ref_path = "my_ref.csv"
train_ref_path = "train.csv"

BZ = 100
KF = 5
tsplit = 0.8
Lens = 6500
train_len = 5200
test_len = 1300

data_len = 16000

def create_csv(ref_path,save_path):
    data_df = pd.read_csv(ref_path)
    labels = data_df.iloc[:,1:]
    indexs = []
    for ls in labels.values:
        #print(ls)
        index = np.zeros((9,)).astype(np.int32)
        for l in ls:
            try:
                index[int(l)] = 1
            except:
                break
            finally:
                pass
        indexs.append(list(index))
    out_df = pd.DataFrame()
    out_df["fname"] = data_df["File_name"]
    out_df["label"] = indexs
    print(out_df.head())
    out_df.to_csv(save_path,index=None)
    print("saved csv",save_path)

def read_csv(save_path,train_csv_path):
    my_df = pd.read_csv(save_path)
    my_df = my_df.sample(frac=1).reset_index(drop=True)
    my_df.to_csv(train_csv_path,index=None)


def get_feature(wav_file,BASE_DIR=train_base_dir,Lens=12000,pred=False,train=False):

    file_path = BASE_DIR+wav_file+".mat"

    mat = loadmat(file_path)

    dat = np.array([mat[s][0] for s in ["I","II","III","aVR","aVL","aVF","V1","V2","V3","V4","V5","V6"]])

    
    while(len(dat[0])<=Lens+10):
        dat=np.concatenate([dat,dat],1)

    if(not pred):

        if(train):

            sub_len = len(dat[0])-Lens
            rand = np.random.randint(0,high =2)
            if(rand==0):
                start_index = 0
            else:
                start_index = sub_len

        else:
            start_index = 0
    
        dat = dat[:,start_index:start_index+Lens]

        dat = dat.transpose([1,0])
        feature = dat


    else:
        sub_len = len(dat[0])-Lens
        dat1 = dat[:,0:Lens]
        dat2 = dat[:,sub_len:]
        dat1 = dat1.transpose([1,0])
        dat2 = dat2.transpose([1,0])

        feature = np.array([dat1,dat2])

    sex = mat["sex"][0]
    age = mat["age"][0][0]

    
    return(feature)

def xs_gen(path=train_ref_path,batch_size = BZ,train=True,lens = test_len,KF=0):

    img_list = pd.read_csv(path)

    img_list = img_list.values

    if(KF == 0):
        pass
    else:
        img_list = np.concatenate([img_list[test_len*KF:],img_list[:test_len*KF]])

    if train :

        img_list = np.array(img_list)[test_len:]
        print("Found %s train items."%len(img_list))
        print("list 1 is",img_list[0])
        steps = math.ceil(len(img_list) / batch_size)    # 确定每轮有多少个batch
    else:
        img_list = np.array(img_list)[:test_len]
        print("Found %s test items."%len(img_list))
        print("list 1 is",img_list[0])
        steps = math.ceil(len(img_list) / batch_size)    # 确定每轮有多少个batch
    while True:
        if(train):
            np.random.shuffle(img_list)
        for i in range(steps):

            batch_list = img_list[i * batch_size : i * batch_size + batch_size]
            np.random.shuffle(batch_list)
            batch_x = np.array([get_feature(file,train=True) for file in batch_list[:,0]])
            batch_y = np.array([eval(label) for label in batch_list[:,1]])

            yield batch_x, batch_y

  

def build_model(input_shape=(12000,12),num_classes=9):
    model = Sequential()

    model.add(Conv1D(16, 16,strides=2, activation='relu',input_shape=input_shape))
    model.add(Conv1D(16, 16,strides=2, activation='relu',padding="same"))
    model.add(MaxPooling1D(2))
    #model.add(Dropout(0.5))

    model.add(Conv1D(64, 16,strides=2, activation='relu',padding="same"))
    model.add(Conv1D(64, 16,strides=2, activation='relu',padding="same"))
    model.add(MaxPooling1D(2))
    #model.add(Dropout(0.5))

    model.add(Conv1D(128, 8,strides=2, activation='relu',padding="same"))
    model.add(Conv1D(128, 8,strides=2, activation='relu',padding="same"))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.5))

    model.add(Conv1D(256, 8,strides=1, activation='relu',padding="same"))
    model.add(Conv1D(256, 8,strides=1, activation='relu',padding="same"))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.5))

    model.add(Conv1D(512, 4,strides=1, activation='relu',padding="same"))
    model.add(Conv1D(512, 4,strides=1, activation='relu',padding="same"))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.5))

    model.add(Conv1D(1024, 4,strides=1, activation='relu',padding="same"))
    model.add(Conv1D(1024, 4,strides=1, activation='relu',padding="same"))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(512))
    #model.add(GlobalAveragePooling1D())
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='sigmoid'))
    return(model)

def conv_block(x,filters,kernel_size,strides,padding):
    x = Conv1D(filters,kernel_size,strides=strides,padding=padding,activation='relu')(x)
    x = BatchNormalization()(x)
    return(x)


def res_layer(x,filters,kernel_size,strides,short=False):
    x1 = x
    x2 = conv_block(x1,filters,kernel_size,strides=strides,padding="same")
    x2 = conv_block(x2,filters,kernel_size,strides=1,padding="same")
    if(short):
        x1 = AveragePooling1D(pool_size=2,strides=strides,padding="same")(x1)
        x1 = conv_block(x1,filters,kernel_size,strides=1,padding="same")
    y = add([x1,x2])
    return(y)


def build_res_model(input_shape=(12000,12),num_classes=9):
    inp = Input(shape=input_shape)

    x = conv_block(inp,16,16,2,padding="same")
    x = conv_block(x,16,16,2,padding="same")
    x = MaxPooling1D(pool_size=2,strides=2)(x)
    x = res_layer(x,16,8,1,short=False)


    x = res_layer(x,64,8,2,short=True)
    x = res_layer(x,64,8,2,short=True)


    x = res_layer(x,128,8,2,short=True)
    x = res_layer(x,128,8,2,short=True)



    x = res_layer(x,256,8,2,short=True)
    x = res_layer(x,256,8,2,short=True)

    x = Dropout(0.5)(x)

    x = res_layer(x,512,4,2,short=True)
    x = res_layer(x,512,4,2,short=True)

    x = Dropout(0.5)(x)

    x = res_layer(x,1024,4,2,short=True)
    x = res_layer(x,1024,4,1,short=False)

    x = Flatten()(x)

    x = Dropout(0.5)(x)

    y = Dense(num_classes,activation="sigmoid")(x)
    
    model = Model(inp,y)

    return(model)


def my_binary_crossentropy(y_true, y_pred,k=8):
    bcy = K.binary_crossentropy(y_true,y_pred)
    loss = bcy *(1+y_true * (k-1))*(k+1)/(2*k)
    losses = K.mean(loss,axis=-1)

    return(losses)

def multi_acc(y_true, y_pred,t=0.5):

    return K.equal(K.mean(K.equal((y_true>=t),(y_pred>=t)),axis=-1),1)


def train(eps):
    scores = []
    for kf in range(0,KF):

        model = build_res_model()
        #print(model.summary())

        opt = Adam(0.001,decay=0.001)
        
        #model.load_weights('model'+'_kf_'+str(kf)+'.h5')

        model.compile(loss=my_binary_crossentropy,
                    optimizer=opt, metrics=['binary_accuracy',multi_acc])

        train_iter = xs_gen(train=True,KF=kf)
        val_iter = xs_gen(train=False,KF=kf)

        

        ckpt = keras.callbacks.ModelCheckpoint(
            filepath='model'+'_kf_'+str(kf)+'.h5',
            monitor="val_multi_acc",mode="max",save_best_only=True,verbose=1)

        print("Kfold ",kf+1)

        H = model.fit_generator(
            generator=train_iter,
            steps_per_epoch=math.ceil(train_len/BZ),
            epochs=0+eps,
            initial_epoch=0,
            validation_data = val_iter,
            validation_steps =math.ceil((Lens-train_len)/BZ),
            callbacks=[ckpt],
        )
        scores.append(np.max(H.history["val_multi_acc"]))
        del model

        print(scores,np.mean(scores))
    
    


def app(model_path = "model.02-0.3502-0.869.h5"):
    model = load_model(model_path)
    val_iter = xs_gen(batch_size=10,train=False)
    for x,y in val_iter:
        break
    ret = model.evaluate(x,y)
    print(ret)
    print(y)
    

if __name__ == "__main__":
    
    """create_csv(ref_path,my_ref_path)
    read_csv(my_ref_path,train_ref_path)"""
    train(192)
    #app()
    #count()
    pass
    "[0.78230768900651193, 0.78384614907778227, 0.77999999889960658, 0.78461538369839012, 0.77769230879270113] 0.781692305895"

2.预测代码,用到了KFold模型平均融合

import sys
import os
import numpy as np
#import scipy.io as sio
import random
from decimal import Decimal
import argparse
import csv

from glob import glob
import pandas as pd
import numpy as np
from scipy.io import loadmat
import math
from matplotlib import pyplot as plt
from keras.layers import *
from keras.models import *
from keras.optimizers import *
import keras
from tqdm import tqdm

# Usage: python rematch_challenge.py test_file_path

base_path = "/media/jdcloud/"
train_base_dir = base_path+"Train/"
val_base_dir = base_path+"Val"


def arg_parse():
    """
    Parse arguements

    """
    parser = argparse.ArgumentParser(description='Rematch test of ECG Contest')
    parser.add_argument("--test_path", dest='test_path', help=
                        "the file path of Test Data",
                        default=val_base_dir, type=str)

    #You need to write your test data path with the argparse parameter.
    #For your convenience when testing with local data, you can write your local test set path to default


    return parser.parse_args()


def get_feature(wav_file,BASE_DIR=train_base_dir,Lens=12000,pred=False,train=False):

    file_path = BASE_DIR+wav_file

    mat = loadmat(file_path)

    dat = np.array([mat[s][0] for s in ["I","II","III","aVR","aVL","aVF","V1","V2","V3","V4","V5","V6"]])

    
    while(len(dat[0])<=Lens+10):
        dat=np.concatenate([dat,dat],1)

    if(not pred):

        if(train):

            sub_len = len(dat[0])-Lens
            rand = np.random.randint(0,high =2)
            if(rand==0):
                start_index = 0
            else:
                start_index = sub_len

        else:
            start_index = 0
    
        dat = dat[:,start_index:start_index+Lens]

        dat = dat.transpose([1,0])
        feature = dat


    else:
        sub_len = len(dat[0])-Lens
        dat1 = dat[:,0:Lens]
        dat2 = dat[:,sub_len:]
        dat1 = dat1.transpose([1,0])
        dat2 = dat2.transpose([1,0])

        feature = np.array([dat1,dat2])

    sex = mat["sex"][0]
    age = mat["age"][0][0]

    
    return(feature)




def my_binary_crossentropy(y_true, y_pred,k=8):
    bcy = K.binary_crossentropy(y_true,y_pred)
    loss = bcy + bcy * y_true * (k-1)
    losses = K.mean(loss,axis=-1)/k

    return(losses)

def multi_acc(y_true, y_pred,t=0.5):

    return K.equal((K.mean(K.equal((y_true>=t),(y_pred>=t)),axis=-1)),1)

def get_index(y_pred,t=0.5):
    indexs = []
    for i,y in enumerate(y_pred):
        if(y>=t):
            indexs.append(i)
    return(indexs)

    

def main():

    args = arg_parse()
    test_path = args.test_path
    print(test_path)


    ## Add your codes to  classify normal and diseases.


    
    model1 = load_model("./model/model_kf_0.h5",custom_objects={"my_binary_crossentropy":my_binary_crossentropy,"multi_acc":multi_acc})

    
    model2 = load_model("./model/model_kf_1.h5",custom_objects={"my_binary_crossentropy":my_binary_crossentropy,"multi_acc":multi_acc})


    model3 = load_model("./model/model_kf_2.h5",custom_objects={"my_binary_crossentropy":my_binary_crossentropy,"multi_acc":multi_acc})

    model4 = load_model("./model/model_kf_3.h5",custom_objects={"my_binary_crossentropy":my_binary_crossentropy,"multi_acc":multi_acc})


    model5 = load_model("./model/model_kf_4.h5",custom_objects={"my_binary_crossentropy":my_binary_crossentropy,"multi_acc":multi_acc})




    print(model3.summary())


    ##  Classify the samples of the test set and write the results into answers.txt,
    ##  and each row representing a prediction of one sample.
    ##  Here we use random numbers as prediction labels as an example and
    ##  you should replace it with your own results.
    Data_list = os.listdir(test_path)

    with open('answers.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['File_name', 'label1', 'label2', 'label3', 'label4', 'label5', 'label6', 'label7', 'label8'])
        count = 0
        for file_name in tqdm(Data_list):

            if file_name.endswith('.mat'):
                count += 1

                record_name = file_name.strip('.mat')
                answer = []
                answer.append(record_name)
                feature = get_feature(file_name,BASE_DIR=test_path+"/",pred=False)

                if(len(feature)==2):

                    #feature = np.expand_dims(feature,0)
                    rets = model1.predict(feature)
                    ret1 = (rets[0] + rets[1])/2

                    rets = model2.predict(feature)
                    ret2 = (rets[0] + rets[1])/2

                    rets = model3.predict(feature)
                    ret3 = (rets[0] + rets[1])/2

                    rets = model4.predict(feature)
                    ret4 = (rets[0] + rets[1])/2

                    rets = model5.predict(feature)
                    ret5 = (rets[0] + rets[1])/2

                    
                else:
                    feature = np.expand_dims(feature,axis=0)
                    rets = model1.predict(feature)
                    ret1 = rets[0]

                    rets = model2.predict(feature)
                    ret2 = rets[0]

                    rets = model3.predict(feature)
                    ret3 = rets[0]

                    rets = model4.predict(feature)
                    ret4 = rets[0]

                    rets = model5.predict(feature)
                    ret5 = rets[0]



                ret = (ret4+ret3+ret2+ret1)/4

                ret_indexs = get_index(ret)


                for index in ret_indexs:

                    answer.append(index)

                for x in range(9-len(ret_indexs)):
                    answer.append('')

                writer.writerow(answer)
            #print(count)
        csvfile.close()
        print("ok")


if __name__ == "__main__":
    main()

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小宋是呢

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值