基本对应给的提示,不难,仅供参考
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, MaxPool1D, LSTM, BatchNormalization, Dropout, Flatten, Reshape
model = Sequential()
######### Begin########
# 1nd block 通道数 64
model.add(Conv1D(64, 3, activation="relu",
input_shape=(1, 1), padding="same"))
model.add(Conv1D(64, 3, activation="relu", padding="same"))
model.add(BatchNormalization(trainable=True))
model.add(MaxPool1D(pool_size=2, strides=1, padding='same'))
# 2nd block 通道数 128
model.add(Conv1D(128, 3, activation="relu", padding="same"))
model.add(Conv1D(128, 3, activation="relu", padding="same"))
model.add(BatchNormalization(trainable=True))
model.add(MaxPool1D(pool_size=2, strides=1, padding='same'))
model.add(Conv1D(256, kernel_size=3, activation='relu', padding='same'))
model.add(Conv1D(256, kernel_size=3, activation='relu', padding='same'))
model.add(Conv1D(256, kernel_size=3, activation='relu', padding='same'))
model.add(BatchNormalization(trainable=True))
model.add(MaxPool1D(pool_size=2, strides=1, padding='same'))
# 4th block 通道数 512
model.add(Conv1D(512, kernel_size=3, activation='relu', padding='same'))
model.add(Conv1D(512, kernel_size=3, activation='relu', padding='same'))
model.add(Conv1D(512, kernel_size=3, activation='relu', padding='same'))
model.add(BatchNormalization(trainable=True))
model.add(MaxPool1D(pool_size=2, strides=1, padding='same'))
# 5th block 通道数 512 (可以是一个重复块或稍微不同的配置)
model.add(Conv1D(512, kernel_size=3, activation='relu', padding='same'))
model.add(Conv1D(512, kernel_size=3, activation='relu', padding='same'))
model.add(Conv1D(512, kernel_size=3, activation='relu', padding='same'))
model.add(BatchNormalization(trainable=True))
model.add(MaxPool1D(pool_size=2, strides=1, padding='same'))
# 6nd block LSTM层
model.add(LSTM(512))
model.add(BatchNormalization(trainable=True))
# 7nd block 全连接层
model.add(Dense(512, activation='relu'))
model.add(Dense(128, activation='tanh'))
model.add(Dense(1, activation='linear'))
########End########
model.summary()