import torch
import torch.nn as nn
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class CLDNN(nn.Module):
def __init__(self):
super(CLDNN, self).__init__()
# 定义第一个隐藏层
self.conv1 = nn.Sequential(
nn.ZeroPad2d((2, 0, 0, 0)),
# kernel_size[0]--H kernel_size[1]--W
nn.Conv2d(1, 128, kernel_size=(1, 3)),
nn.BatchNorm2d(128),
nn.ReLU(),
)
# 定义第二个隐藏层
self.conv2 = nn.Sequential(
nn.ZeroPad2d((2, 0, 0, 0)),
nn.Conv2d(128, 64, kernel_size=(1, 3)),
nn.BatchNorm2d(64),
nn.ReLU(),
)
# 定义第三个隐藏层
self.conv3 = nn.Sequential(
nn.ZeroPad2d((2, 0, 0, 0)),
nn.Conv2d(64, 32, kernel_size=(4, 3)),
nn.BatchNorm2d(32),
nn.ReLU(),
)
# 定义第四个隐藏层
self.conv4 = nn.Sequential(
nn.ZeroPad2d((2, 0, 0, 0)),
nn.Conv2d(32, 16, kernel_size=(1, 3)),
nn.BatchNorm2d(16),
nn.ReLU(),
)
# LSTM为长短时神经网络
self.lstm = nn.LSTM(
input_size=20, # 这个地方根据输入数据维度不同会有变化
hidden_size=100,
num_layers=1,
batch_first=True, # 若为True,则第一个维度为batch
bidirectional=True # 如果为True,则为双向RNN;默认为False
)
# 全连接层
self.classifier = nn.Sequential(
nn.Linear(220, 50), # 200 + lstm的input_size
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(50, 11)
)
def forward(self, input):
input = input.unsqueeze(1).to(device)
# CNN计算结果
conv1 = self.conv1(input)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
cnn_out = self.conv4(conv3)
# 融合input和CNN的结果,一起送至RNN
cnn_out = cnn_out.squeeze(2)
cnn_out = cnn_out.permute(0, 2, 1)
input = input.squeeze(1)
input = input.permute(0, 2, 1)
combine_input_cnn = torch.cat(
[input, cnn_out], dim=2)
rnn_in = combine_input_cnn
# RNN计算结果
rnn_out, _ = self.lstm(rnn_in)
# 融合CNN和RNN的结果,一起送至DNN
# [batchsize, 128, 200+20]
conbime_cnn_rnn = torch.cat([combine_input_cnn, rnn_out], dim=2)
z = self.classifier(conbime_cnn_rnn[:, -1, :])
return z
if __name__ == "__main__":
x = torch.randn(32, 2, 128)
model = CLDNN()
out = model(x)
print(out)
CLDNN
最新推荐文章于 2024-07-13 21:18:24 发布