最新问题
Traceback (most recent call last):
File “D:/pythonProject1/lstmcnn/lstmcnn.py”, line 178, in
output_ = model(x_input)
File “D:\Anaconda\lib\site-packages\torch\nn\modules\module.py”, line 1051, in _call_impl
return forward_call(*input, **kwargs)
File “D:/pythonProject1/lstmcnn/lstmcnn.py”, line 105, in forward
x = self.conv1(x) # 32, 50, 178
File “D:\Anaconda\lib\site-packages\torch\nn\modules\module.py”, line 1051, in _call_impl
return forward_call(*input, **kwargs)
File “D:\Anaconda\lib\site-packages\torch\nn\modules\conv.py”, line 298, in forward
return self._conv_forward(input, self.weight, self.bias)
File “D:\Anaconda\lib\site-packages\torch\nn\modules\conv.py”, line 294, in _conv_forward
return F.conv1d(input, weight, bias, self.stride,
RuntimeError: Expected 3-dimensional input for 3-dimensional weight [50, 20, 3], but got 4-dimensional input of size [180, 100, 32, 20] instead
输入的维度要怎么改呢
# 定义网络结构
class LSTM_CNN(nn.Module):
def __init__(self, vocab_size, hidden_dim, num_layers, embedding_dim, output_dim):
super(LSTM_CNN, self).__init__()
self.hidden_dim = hidden_dim # 隐层大小
self.num_layers = num_layers # LSTM层数
# 嵌入层,会对所有词形成一个连续型嵌入向量,该向量的维度为embedding_dim
# 然后利用这个向量来表示该字,而不是用索引继续表示
self.embeddings = nn.Embedding(vocab_size + 1, embedding_dim)
# 定义LSTM层,第一个参数为每个时间步的特征大小,这里就是每个字的维度
# 第二个参数为隐层大小
# 第三个参数为lstm的层数
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers)
self.conv1 = nn.Conv1d(embedding_dim, 50, 3)
self.maxpool1 = nn.AdaptiveAvgPool1d(output_size=100)
self.conv2 = nn.Conv1d(50, 100, 3)
self.maxpool2 = nn.AdaptiveAvgPool1d(output_size=50)
self.fc = nn.Linear(50 * 100, output_dim)
def forward(self, x):
# 1.首先形成嵌入向量
embeds = self.embeddings(x)
# 2.将嵌入向量导入到lstm层
output, (h_n, c_n) = self.lstm(embeds)
# timestep, batch_size, hidden_dim = output.shape
# output = output.reshape(-1, hidden_dim)
x = self.embeddings(output) # 32,180, 16 批次,序列长度,词嵌入长度
x = x.transpose(1, 2) # 32, 16, 180 批次,词嵌入长度,序列长度
x = self.conv1(x) # 32, 50, 178
x = self.maxpool1(x) # 32, 50, 100
x = self.conv2(x) # 32, 100, 1