class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
This layer can only be used as the first layer in a model.
# Example
```python
# 输入是1000条文本,字符数为1000个字符,每条文本是[1, 10, 2, 18...] 长度为 10 的 vocabulary index
# 输出是 1000 * 10 * 64 的矩阵,1000表示数据量(文本数),10 * 64 表示每条文本的 10个单词 * 每个单词的嵌入向量维度
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# the model will take as input an integer matrix of size (batch, input_length).
# the largest integer (i.e. word index) in the input should be
# no larger than 999 (vocabulary size).
# now model.output_shape == (None, 10, 64), where None is the batch dimension.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
# Arguments
input_dim: int > 0. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: int >= 0. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings` matrix
(see [initializers](../initializers.md)).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix
(see [constraints](../constraints.md)).
mask_zero: Whether or not the input value 0 is a special "padding"
value that should be masked out.
This is useful when using [recurrent layers](recurrent.md)
which may take variable length input.
If this is `True` then all subsequent layers
in the model need to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence, index 0 cannot be
used in the vocabulary (input_dim should equal size of
vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
# Input shape
2D tensor with shape: `(batch_size, sequence_length)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, output_dim)`.
# References
- [A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_embedding_support
def __init__(self, input_dim, output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
super(Embedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.input_length = input_length
embedding
最新推荐文章于 2022-05-16 16:01:37 发布