import torch
from torch.autograd import Variable
from torch import nn
batch_size = 4
max_length = 3
hidden_size = 2
n_layers = 1
feature_dim = 1
# container
batch_in = torch.zeros((batch_size, max_length, feature_dim))
# data
vec_1 = torch.FloatTensor([[1, 2, 3]])
vec_2 = torch.FloatTensor([[1, 2, 0]])
vec_3 = torch.FloatTensor([[1, 0, 0]])
vec_4 = torch.FloatTensor([[2, 0, 0]])
batch_in[0] = vec_1
batch_in[1] = vec_2
batch_in[2] = vec_3
batch_in[3] = vec_4
batch_in = Variable(batch_in)
print(batch_in.size())
seq_lengths = [3, 2, 1, 1] # list of integers holding information about the batch size at each sequence step
# pack it
pack = torch.nn.utils.rnn.pack_padded_sequence(batch_in, seq_lengths, batch_first=True)
print(pack)
rnn = nn.RNN(feature_dim, hidden_size, n_layers, batch_first=True)
h0 = V
pytorch rnn输入 一个batch内的序列pad到同一长度
最新推荐文章于 2024-08-09 15:25:17 发布
在PyTorch中训练RNN模型时,由于一个batch内的序列长度不一,需要使用pad操作统一长度。通过pack_padded_sequence和pad_packed_sequence确保序列正确输入和输出。RNN不会对填充元素进行计算,避免了无效运算。动态RNN或使用packed input可以更好地处理这种情况,避免对padding进行不必要的计算。
摘要由CSDN通过智能技术生成