BERT-tensorflow 示例代码-dense100

一页简单介绍代码:

首先 pip install bert-tensorflow

权重文件下载地址:

href="https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip"

import tensorflow as tf
import numpy as np
from bert import modeling,optimization,tokenization

config_path = './bert/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = './bert/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = './bert/chinese_L-12_H-768_A-12/vocab.txt'
bert_config = modeling.BertConfig.from_json_file(config_path)
tokenizer = tokenization.FullTokenizer(vocab_file=dict_path,do_lower_case=False)

testText = '我想听周杰伦的稻香'

tokens = tokenizer.tokenize(testText)
tokens = ['[CLS]']+tokens+['[SEP]']
input_ids = tokenizer.convert_tokens_to_ids(tokens)

print(tokens)
print(input_ids)

input_mask = [1]*len(input_ids)
segment_ids = [0]*len(input_ids)

print(tokens)
print(input_ids)
print(input_mask)

maxLen = 128
while len(input_ids) < maxLen:
    input_ids.append(0)
    input_mask.append(0)
    segment_ids.append(0)

assert len(input_ids) == maxLen
assert len(segment_ids) == maxLen
assert len(input_mask) == maxLen

input_ids = np.reshape(np.array(input_ids),(1,-1))
input_mask = np.reshape(np.array(input_mask),(1,-1))
segment_ids = np.reshape(np.array(segment_ids),(1,-1))


input_ids_p=tf.placeholder(shape=[None,None],dtype=tf.int32,name="input_ids_p")
input_mask_p=tf.placeholder(shape=[None,None],dtype=tf.int32,name="input_mask_p")
segment_ids_p=tf.placeholder(shape=[None,None],dtype=tf.int32,name="segment_ids_p")

model = modeling.BertModel(config=bert_config,
                           is_training=False,
                           input_ids=input_ids_p,
                           input_mask=input_mask_p,
                           token_type_ids=segment_ids_p,
                           use_one_hot_embeddings=False)
output = model.get_sequence_output()
dense_to_100 = tf.layers.dense(output,100,name='dense_to_100')


# saver = tf.train.Saver()
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, checkpoint_path)
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)

with tf.Session() as sess:
    # sess.run(tf.variables_ini/tializer(tf.global_variables()))
    sess.run(tf.global_variables_initializer())
    # saver.restore(sess,save_path=checkpoint_path)
    print(sess.run(output,feed_dict={
        input_ids_p: input_ids,
        input_mask_p: input_mask,
        segment_ids_p: segment_ids
    }))
    print(sess.run(tf.shape(dense_to_100),feed_dict={
        input_ids_p: input_ids,
        input_mask_p: input_mask,
        segment_ids_p: segment_ids
    }))

 

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
下面是一个基于Keras的BERT-BiLSTM-Attention代码示例: ```python import tensorflow as tf from tensorflow.keras.layers import Input, Dense, Dropout, LSTM, Bidirectional, TimeDistributed, Masking, \ Concatenate, Attention from transformers import TFBertModel # 定义模型输入 input_ids = Input(shape=(max_seq_length,), dtype=tf.int32, name='input_ids') attention_mask = Input(shape=(max_seq_length,), dtype=tf.int32, name='attention_mask') # 载入预训练的BERT bert_model = TFBertModel.from_pretrained('bert-base-chinese') # BERT编码层 output = bert_model(input_ids, attention_mask=attention_mask)[0] # BiLSTM层 output = Bidirectional(LSTM(128, return_sequences=True))(output) # Attention层 attention = Attention()([output, output]) # 将BiLSTM和Attention的输出连接起来 output = Concatenate()([output, attention]) # Dropout层 output = Dropout(0.5)(output) # 全连接层 output = TimeDistributed(Dense(num_tags, activation='softmax'))(output) # 定义模型 model = tf.keras.models.Model(inputs=[input_ids, attention_mask], outputs=output) # 编译模型 optimizer = tf.keras.optimizers.Adam(lr=2e-5) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) ``` 其中,`max_seq_length`是输入序列的最大长度,`num_tags`是标签的数量。我们使用了`transformers`库来载入预训练的BERT模型,使用了Keras的层来构建BiLSTM和Attention层,最后使用Keras的`Model`类定义整个模型。在编译模型时,我们使用了Adam优化器、交叉熵损失和稀疏分类精度作为评估指标。
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值