ResNet-DenseNet Architecture with ResU Blocks(Tensorflow版本)

import tensorflow as tf
from tensorflow.keras import layers, models
import math

def Bottleneck(x, growthRate):
    interChannels = 4 * growthRate
    out = layers.BatchNormalization()(x)
    out = tf.nn.relu(out)
    out = layers.Conv1D(interChannels, kernel_size=1, use_bias=False)(out)

    out = layers.BatchNormalization()(out)
    out = tf.nn.relu(out)
    out = layers.Conv1D(growthRate, kernel_size=3, padding='same', use_bias=False)(out)

    out = layers.Concatenate()([x, out])
    return out



def SingleLayer(x, growthRate):
    out = layers.BatchNormalization()(x)
    out = tf.nn.relu(out)
    out = layers.Conv1D(growthRate, kernel_size=3, padding='same', use_bias=False)(out)

    out = layers.Concatenate()([x, out])
    return out


def Transition(x, nOutChannels, down=False):
    out = layers.BatchNormalization()(x)
    out = tf.nn.relu(out)
    out = layers.Conv1D(nOutChannels, kernel_size=1, use_bias=False)(out)

    if down:
        out = layers.AveragePooling1D(pool_size=2)(out)

    return out


def ResidualUBlock(inputs, out_ch, mid_ch, layers_num, downsampling=True):
    K = 9  # Kernel size

    x = layers.Conv1D(out_ch, kernel_size=K, padding="same", use_bias=False)(inputs)
    x = layers.BatchNormalization()(x)
    x = tf.nn.leaky_relu(x)

    encoders_out = []
    for idx in range(layers_num):
        encoder_x = layers.Conv1D(mid_ch, kernel_size=K, strides=2, padding="same", use_bias=False)(x)
        encoder_x = layers.BatchNormalization()(encoder_x)
        encoder_x = tf.nn.leaky_relu(encoder_x)
        # print('encoder_x',encoder_x.shape)
        encoders_out.append(encoder_x)
        x = encoder_x

    x = layers.Conv1D(mid_ch, kernel_size=K, padding="same", use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = tf.nn.leaky_relu(x)

    for idx in range(layers_num - 2, -1, -1):
        decoder_x = layers.Conv1DTranspose(mid_ch if idx != 0 else out_ch, kernel_size=K, strides=2, padding="same",
                                           use_bias=False)(x)
        decoder_x = layers.BatchNormalization()(decoder_x)
        decoder_x = tf.nn.leaky_relu(decoder_x)
        # print('decoder_x',decoder_x.shape)
        x = layers.Concatenate()([decoder_x, encoders_out[idx]])

    if downsampling:
        x = layers.AveragePooling1D(pool_size=2, strides=2)(x)
        x = layers.Conv1D(out_ch, kernel_size=1, use_bias=False)(x)

    return x


def _make_dense(x, growthRate, nDenseBlocks, bottleneck):
    for i in range(nDenseBlocks):
        if bottleneck:
            x = Bottleneck(x, growthRate)
        else:
            x = SingleLayer(x, growthRate)
    return x


def ResU_Dense(input_shape, nOUT, out_ch=256, mid_ch=64):
    inputs = layers.Input(shape=input_shape)

    x = layers.Conv1D(out_ch, kernel_size=15, padding="same", strides=2, use_bias=False)(inputs)
    x = layers.BatchNormalization()(x)
    x = tf.nn.leaky_relu(x)

    x = ResidualUBlock(x, out_ch=out_ch, mid_ch=mid_ch, layers_num=6)

    x = ResidualUBlock(x, out_ch=out_ch, mid_ch=mid_ch, layers_num=5)
    x = ResidualUBlock(x, out_ch=out_ch, mid_ch=mid_ch, layers_num=4)
    x = ResidualUBlock(x, out_ch=out_ch, mid_ch=mid_ch, layers_num=3)

    growthRate = 12
    reduction = 0.5
    nDenseBlocks = 16

    x = _make_dense(x, growthRate=growthRate, nDenseBlocks=nDenseBlocks, bottleneck=True)
    nChannels = out_ch + nDenseBlocks * growthRate
    nOutChannels = int(math.floor(nChannels * reduction))
    x = Transition(x, nOutChannels, down=True)

    x = _make_dense(x, growthRate=growthRate, nDenseBlocks=nDenseBlocks, bottleneck=True)
    nChannels = nOutChannels + nDenseBlocks * growthRate
    x = Transition(x, out_ch, down=True)

    x = layers.Dropout(0.5)(x)

    x = tf.transpose(x, perm=[2, 0, 1])  # (C, B, L)
    x = layers.MultiHeadAttention(num_heads=8, key_dim=out_ch)(x, x, x)
    x = tf.transpose(x, perm=[1, 2, 0])  # (B, L, C)

    x = layers.GlobalMaxPooling1D()(x)

    x = layers.Dense(nOUT)(x)

    model = Model(inputs=inputs, outputs=x)
    return model


if __name__ == '__main__':
    # 生成随机数据的例子
    import numpy as np

    batch_size = 16
    input_length = 2048
    input_channels = 12
    num_classes = 10

    x_train = np.random.rand(batch_size, input_length, input_channels).astype(np.float32)
    y_train = np.random.randint(0, num_classes, size=(batch_size, num_classes)).astype(np.float32)

    # 创建模型实例
    model = ResU_Dense(input_shape=(input_length, input_channels), nOUT=num_classes)

    # 编译模型
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

    # 训练模型
    model.fit(x_train, y_train, epochs=10)




import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import re
import sys
sys.path.append('/media/arch/40C65AD0C65AC636/AAA/ECG_Efficientnets')

import pandas as pd
import numpy as np
import tensorflow as tf

import time
from data_helper import data_loader,root
from collections import namedtuple,defaultdict


X_train, X_train_age, X_train_gender, y_train, \
X_dev, X_dev_age, X_dev_gender, y_dev, \
X_test, X_test_age, X_test_gender, y_test=data_loader(is_rebuild_dataset=False)



# X_train=np.expand_dims(X_train,axis=1)
# X_dev=np.expand_dims(X_dev,axis=1)
# X_test=np.expand_dims(X_test,axis=1)


from model.keras_efficientnets.keras_efficientnets.efficientnet import ResU_Dense,EfficientNetB0,EfficientNet_Other,mina
from model.keras_efficientnets.keras_efficientnets.optimize import cost_func_wrapper,optimize_coefficients
from model.keras_efficientnets.keras_efficientnets.config import BlockArgs, get_default_block_list


params={
    "learning_rate": 0.001,
    "batch_size": 64,
    'input_shape':[1, 5120, 8],
    'num_categories':55,
    "task_label":'multi_label',
}

batch_size = 16
input_length = 5120
input_channels = 8

print(X_train.shape)

model = ResU_Dense(input_shape=(input_length, input_channels), nOUT=params['num_categories'])

#EfficientNet_Other,EfficientNet_Other,mina
# model=EfficientNet(input_shape=params['input_shape'],
#                     block_args_list=get_default_block_list(),
#                     width_coefficient=1.0,
#                     depth_coefficient=1.0,
#                     include_top=True,
#                     weights=None,
#                     input_tensor=None,
#                     pooling=None,
#                     classes=params['num_categories'],
#                     dropout_rate=0.2,
#                     drop_connect_rate=0.,
#                     data_format=None,
#                     task_label = params['task_label'],
#                     default_size=224)
model.summary()



import keras
from utils.config import EFFICIENT_NETS_MODEL_SAVE_PATH
from utils.metrics import micro_f1, macro_f1

stopping = keras.callbacks.EarlyStopping(patience=8, mode='max', monitor='micro_f1')

reduce_lr = keras.callbacks.ReduceLROnPlateau(
    factor=0.1,
    patience=2,
    min_lr=params["learning_rate"] * 0.001)

checkpointer = keras.callbacks.ModelCheckpoint(
    filepath=EFFICIENT_NETS_MODEL_SAVE_PATH,
    save_best_only=False)

batch_size = params.get("batch_size", 64)

from tensorflow.keras.optimizers import Adam

optimizer = Adam(
    lr=params["learning_rate"],
    clipnorm=params.get("clipnorm", 1))

model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy', micro_f1, macro_f1])

MAX_EPOCHS=15


model.fit(X_train, y_train,
            batch_size=batch_size,
            epochs=MAX_EPOCHS,
            validation_data=(X_dev, y_dev),
            callbacks=[ reduce_lr, stopping])


# model.fit(X_train, y_train,
#             batch_size=batch_size,
#             epochs=MAX_EPOCHS,
#             validation_data=(X_dev, y_dev),
#             callbacks=[checkpointer, reduce_lr, stopping])


# Y_pred = model.predict(X_test)
#














  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值