我设法用一个小技巧找到了这个问题的解决办法。我创建了一个常量张量来保存变量的内容。张量的大小会限制sen_len的大小,但是如果我们选择它足够大,那就不是问题了。在
以下是我的解决方案的一个玩具示例>embeds_raw = tf.constant(np.array([
[1, 1],
[1, 1],
[2, 2],
[3, 3],
[3, 3],
[3, 3],
[4, 4],
[4, 4],
[4, 4],
[4, 4],
], dtype='float32'))
# These play the role of embeddings.
embeds = tf.Variable(initial_value=embeds_raw)
# This variable plays the role of a container. We chose zeros because they are neutral to addition.
container_variable = tf.zeros([512], dtype=tf.int32, name='container_variable')
# Our placeholder for sentence lengths.
sen_len = tf.placeholder('int32', shape=[None], name='sen_len')
# Getting the length of the longest sentence.
max_l = tf.reduce_max(sen_len)
# Number of sentences.
nbr_sentences = tf.shape(sen_len)[0]
# We pad the sentence length var to match that of the container variable.
padded_sen_len = tf.pad(sen_len, [[0, 512 - nbr_sentences]], 'CONSTANT')
# We add the sentence lengths to our container variable.
added_container_variable = tf.add(container_variable, padded_sen_len)
# Create a TensorArray that will contain the split.
u1 = tf.TensorArray(dtype=tf.float32, size=512, clear_after_read=False)
# Split the embeddings by the sentence lengths.
u1 = u1.split(embeds, added_container_variable)
# Loop variables. An index and a variable containing our concatenated arrays.
i = tf.constant(0, shape=(), dtype='int32', name='i')
x = tf.constant(0, shape=[1, 2], dtype=tf.float32)
def condition(_i, _):
"""Checking whether _i is less than the number of sentences."""
return tf.less(_i, nbr_sentences)
def body(_i, _x):
"""Padding and concatenating with _x."""
temp = tf.pad(u1.read(_i), [[0, max_l - sen_len[_i]], [0, 0]], 'CONSTANT')
return _i + 1, tf.concat([_x, temp], 0)
# Looping.
idx, padded_concatenated_sentences = tf.while_loop(
condition,
body,
[i, x],
shape_invariants=[tf.TensorShape([]), tf.TensorShape([None, 2])]
)
# Getting rid of the first row since it contains 0s.
padded_concatenated_sentences = padded_concatenated_sentences[1:]
# Reshaping to obtain the desired results. In our case 2 would be the word embedding dimensionality.
reshaped_elements = tf.reshape(padded_concatenated_sentences, [nbr_sentences, max_l, 2])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sents = sess.run(reshaped_elements, feed_dict={sen_len: [2, 1, 3, 4]})
print(sents)