使用keras+bert进行房产行业聊天问答匹配

比赛菜鸟,给一个keras版本的bert baseline。
比赛地址:https://www.datafountain.cn/competitions/474
代码地址:https://github.com/WhiteGive-Boy/ccf_beike
代码参考苏大神的keras下bert的使用,其博客https://kexue.fm/archives/6736

贝壳问答匹配给定的数据形式如下:
数据示例
中文bert预训练模型下载:https://github.com/google-research/bert

import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
import tensorflow as tf
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.optimizers import Adam
from random import choice
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
import re, os
import codecs
from keras.callbacks import Callback
#数据读取及处理
train_left = pd.read_csv('./train/train.query.tsv',sep='\t',header=None)
train_left.columns=['id','q1']
train_right = pd.read_csv('./train/train.reply.tsv',sep='\t',header=None)
train_right.columns=['id','id_sub','q2','label']
df_train = train_left.merge(train_right, how='left')
df_train['q2'] = df_train['q2'].fillna('好的')
test_left = pd.read_csv('./test/test.query.tsv',sep='\t',header=None, encoding='gbk')
test_left.columns = ['id','q1']
test_right =  pd.read_csv('./test/test.reply.tsv',sep='\t',header=None, encoding='gbk')
test_right.columns=['id','id_sub','q2']
df_test = test_left.merge(test_right, how='left')

PATH = './'
BERT_PATH = './'
WEIGHT_PATH = './'
MAX_SEQUENCE_LENGTH = 100
input_categories = ['q1','q2']
output_categories = 'label'


maxlen = 100#seq最大长度
learning_rate = 5e-5#学习率
min_learning_rate = 1e-5
#设置预训练模型path
config_path = '../chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '../chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '../chinese_L-12_H-768_A-12/vocab.txt'
token_dict = {}

#生成字典dict
with codecs.open(dict_path, 'r', 'utf8') as reader:
    for line in reader:
        token = line.strip()
        token_dict[token] = len(token_dict)
#重写keras_bert的token模块,将空格转换为[unused1]
class OurTokenizer(Tokenizer):
    def _tokenize(self, text):
        R = []
        for c in text:
            if c in self._token_dict:
                R.append(c)
            elif self._is_space(c):
                R.append('[unused1]') # space类用未经训练的[unused1]表示
            else:
                R.append('[UNK]') # 剩余的字符是[UNK]
        return R
#seq填充,未到最大长度使用padding进行填充
def seq_padding(X, padding=0):
    L = [len(x) for x in X]
    ML = max(L)
    return np.array([
        np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
    ])
#数据迭代器
class data_generator:
    def __init__(self, data, batch_size=32):
        self.data = data
        self.batch_size = batch_size
        self.steps = len(self.data) // self.batch_size
        if len(self.data) % self.batch_size != 0:
            self.steps += 1
    def __len__(self):
        return self.steps
    def __iter__(self):
        while True:
            idxs = range(len(self.data))
            np.random.shuffle(list(idxs))
            X1, X2, Y = [], [], []
            for i in idxs:
                d = self.data[i]
                text = d[0][:maxlen] #seq1
                text2 = d[1][:maxlen]#seq2
                x1, x2 = tokenizer.encode(first=text,second=text2)#对seq编码
                y = d[2]#label
                X1.append(x1)
                X2.append(x2)
                Y.append([y])
                if len(X1) == self.batch_size or i == idxs[-1]:
                    X1 = seq_padding(X1)
                    X2 = seq_padding(X2)
                    Y = seq_padding(Y)
                    yield [X1, X2], Y
                    [X1, X2, Y] = [], [], []


tokenizer = OurTokenizer(token_dict)
#划分数据集,训练:验证=9:1
data=df_train[['q1','q2','label']].to_numpy()
random_order = range(len(data))
np.random.shuffle(list(random_order))
train_data = [data[j] for i, j in enumerate(random_order) if i % 10 != 0]
valid_data = [data[j] for i, j in enumerate(random_order) if i % 10 == 0]


#加载预训练bert模型
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)

for l in bert_model.layers:
    l.trainable = True

x1_in = Input(shape=(None,))
x2_in = Input(shape=(None,))

x = bert_model([x1_in, x2_in])
x = Lambda(lambda x: x[:, 0])(x) #模型训练结果的第一位 [cls] 进行预测
p = Dense(1, activation='sigmoid')(x) #加sigmod线性层

model = Model([x1_in, x2_in], p)
model.compile(
    loss='binary_crossentropy', #binary_crossentropy与sigmod对应
    optimizer=Adam(1e-5), # 用足够小的学习率
    metrics=['accuracy']
)
model.summary()




train_D = data_generator(train_data)
valid_D = data_generator(valid_data)

model.fit_generator(
    train_D.__iter__(),
    steps_per_epoch=len(train_D),
    epochs=5,
    validation_data=valid_D.__iter__(),
    validation_steps=len(valid_D)
)
testdata=df_test[['q1','q2']].to_numpy()
def makeresult(testdata):
    result=[]
    for test in testdata:
        _t1, _t2 = tokenizer.encode(first=test[0],second=test[1])
        _t1, _t2 = np.array([_t1]), np.array([_t2])
        label = model.predict([_t1, _t2])
        result.append([label])
    return result
result=makeresult(testdata)
df_test['label']=result
df_test=df_test[['id','id_sub','label']]
df_test.to_csv("result.csv",index=0)

result = pd.read_csv('./result.csv')
result['newlabel']=result['label'].apply(lambda x:re.findall(u'.*\\[\\[(.*)\\]\\].*', x))
result['newlabel']=result['newlabel'].apply(lambda x:x[0])
result['newlabel']=result['newlabel'].apply(lambda x:1 if float(x)>=0.5 else 0)
result=result[['id','id_sub','newlabel']]
# print(result['newlabel'])
result.to_csv("newresult.tsv",sep='\t',header=None,index=0)

使用预训练模型训练5个epoch,最终的F1score可到0.75

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
下面是使用Keras4bert框架实现对输入数据进行预测的整体代码: ```python import numpy as np from keras4bert.models import build_transformer_model from keras4bert.layers import MaskedSoftmax from keras4bert.tokenizers import Tokenizer from keras4bert.optimizers import AdamWarmup from keras4bert.callbacks import ModelCheckpoint # 1. 定义模型参数 maxlen = 128 num_classes = 2 hidden_size = 768 num_hidden_layers = 12 num_attention_heads = 12 intermediate_size = 3072 hidden_act = 'gelu' dropout_rate = 0.1 attention_dropout_rate = 0.1 initializer_range = 0.02 learning_rate = 2e-5 weight_decay_rate = 0.01 num_warmup_steps = 10000 num_train_steps = 100000 batch_size = 32 # 2. 加载预训练模型 model = build_transformer_model( config_path='path/to/bert_config.json', checkpoint_path='path/to/bert_model.ckpt', model='bert', num_classes=num_classes, application='sequence_classification', mask_zero=True, with_pool=True, return_keras_model=False, name='BertModel' ) # 3. 构建训练数据生成器 tokenizer = Tokenizer('path/to/vocab.txt') def data_generator(data, batch_size): """数据生成器""" batch_token_ids, batch_segment_ids, batch_labels = [], [], [] while True: for token_ids, label in data: batch_token_ids.append(token_ids) batch_segment_ids.append([0] * maxlen) batch_labels.append(label) if len(batch_token_ids) == batch_size: batch_token_ids = tokenizer.pad_sequences(batch_token_ids, maxlen=maxlen) batch_segment_ids = tokenizer.pad_sequences(batch_segment_ids, maxlen=maxlen) batch_labels = np.array(batch_labels) yield [batch_token_ids, batch_segment_ids], batch_labels batch_token_ids, batch_segment_ids, batch_labels = [], [], [] # 4. 编译模型 model.compile( loss='sparse_categorical_crossentropy', optimizer=AdamWarmup( decay_steps=num_train_steps, warmup_steps=num_warmup_steps, lr=learning_rate, min_lr=1e-7, weight_decay_rate=weight_decay_rate, exclude_from_weight_decay=['Norm', 'bias'] ), metrics=['accuracy'] ) # 5. 开始训练 train_data = [(token_ids, label) for token_ids, label in zip(train_token_ids, train_labels)] valid_data = [(token_ids, label) for token_ids, label in zip(valid_token_ids, valid_labels)] train_steps = len(train_data) // batch_size valid_steps = len(valid_data) // batch_size callbacks = [ ModelCheckpoint( filepath='path/to/best_model.h5', save_weights_only=True, save_best_only=True, monitor='val_accuracy', mode='max', verbose=1 ) ] model.fit_generator( generator=data_generator(train_data, batch_size), steps_per_epoch=train_steps, epochs=num_train_steps, validation_data=data_generator(valid_data, batch_size), validation_steps=valid_steps, callbacks=callbacks ) # 6. 预测新数据 test_token_ids = tokenizer.texts_to_sequences(test_texts) test_token_ids = tokenizer.pad_sequences(test_token_ids, maxlen=maxlen) test_preds = model.predict([test_token_ids, np.zeros_like(test_token_ids)]) test_preds = np.argmax(test_preds, axis=1) ```
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值