在这里使用的是keras_bert库,可以直接pip安装
1.参数设置
#! -*- coding:utf-8 -*-
import re, os, json, codecs, gc
import numpy as np
import pandas as pd
from random import choice
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras.utils import to_categorical
from keras.layers import *
from keras.callbacks import *
from keras.models import Model
import keras.backend as K
from keras.metrics import top_k_categorical_accuracy
from keras.optimizers import Adam
maxlen = 512
batch_size = 2
drop_out_rate = 0.2
learning_rate = 1e-5
epochs = 5
train_data_path = 'C:/Users/jkx/Desktop/一些比赛/网民情绪分析/nCoV_100k_train.labled.csv'
test_data_path = 'C:/Users/jkx/Desktop/一些比赛/网民情绪分析/nCov_10k_test.csv'
config_path = 'D:/bert/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'D:/bert/chinese_wwm_ext_L-12_H-768_A-12/bert_model.ckpt'
dict_path = 'D:/bert/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
2.读取数据
train_data = pd.read_csv(train_data_path)
test_data = pd.read_csv(test_data_path)
3.处理数据
train_x = train_data['微博中文内容']
train_y = train_data['情感倾向'].apply(lambda x:x if x!=-1 else 2)
Train = []
Test = []
for i,j in zip(train_x,train_y):
Train.append((i, to_categorical(j, 3)))
for i in test_data['微博中文内容']:
Test.append((i, to_categorical(0, 3)))
4.定义tokenizer
token_dict = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
class OurTokenizer(Tokenizer):
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]') # space类用未经训练的[unused1]表示
else:
R.append('[UNK]') # 剩余的字符是[UNK]
return R
tokenizer = OurTokenizer(token_dict)
5.实现填充函数与获取batch函数
def seq_padding(X, padding=0):
L = [len(x) for x in X]
ML = max(L)
return np.array([
np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
])
class data_generator:
def __init__(self, data, batch_size=batch_size, shuffle=True):
self.data = data
self.batch_size = batch_size
self.shuffle = shuffle
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
idxs = list(range(len(self.data)))
if self.shuffle:
np.random.shuffle(idxs)
X1, X2, Y = [], [], []
for i in idxs:
d = self.data[i]
text = d[0][:maxlen]
x1, x2 = tokenizer.encode(first=text)
y = d[1]
X1.append(x1)
X2.append(x2)
Y.append([y])
if len(X1) == self.batch_size or i == idxs[-1]:
X1 = seq_padding(X1)
X2 = seq_padding(X2)
Y = seq_padding(Y)
yield [X1, X2], Y[:, 0, :]
[X1, X2, Y] = [], [], []
6.bert模型
def acc_top2(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=2)
def build_bert(nclass):
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
for l in bert_model.layers:
l.trainable = True
x1_in = Input(shape=(None,))
x2_in = Input(shape=(None,))
x = bert_model([x1_in, x2_in])
x = Lambda(lambda x: x[:, 0])(x)
p = Dense(nclass, activation='softmax')(x)
model = Model([x1_in, x2_in], p)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(1e-5),
metrics=['accuracy', acc_top2])
print(model.summary())
return model
7.模型训练
def run_cv(nfold, data, data_label, data_test):
kf = KFold(n_splits=nfold, shuffle=True, random_state=520).split(data)
train_model_pred = np.zeros((len(data), 3))
test_model_pred = np.zeros((len(data_test), 3))
for i, (train_fold, test_fold) in enumerate(kf):
X_train, X_valid, = data[train_fold, :], data[test_fold, :]
model = build_bert(3)
early_stopping = EarlyStopping(monitor='val_acc', patience=3)
plateau = ReduceLROnPlateau(monitor="val_acc", verbose=1, mode='max', factor=0.5, patience=2)
checkpoint = ModelCheckpoint('./bert_dump/' + str(i) + '.hdf5', monitor='val_acc',
verbose=2, save_best_only=True, mode='max',save_weights_only=True)
train_D = data_generator(X_train, shuffle=True)
valid_D = data_generator(X_valid, shuffle=True)
test_D = data_generator(data_test, shuffle=False)
model.fit_generator(
train_D.__iter__(),
steps_per_epoch=len(train_D),
epochs=5,
validation_data=valid_D.__iter__(),
validation_steps=len(valid_D),
callbacks=[early_stopping, plateau, checkpoint],
)
# model.load_weights('./bert_dump/' + str(i) + '.hdf5')
# return model
train_model_pred[test_fold, :] = model.predict_generator(valid_D.__iter__(), steps=len(valid_D),verbose=1)
test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D),verbose=1)
del model; gc.collect()
K.clear_session()
# break
return train_model_pred, test_model_pred
train_model_pred, test_model_pred = run_cv(10, np.array(Train), None, np.array(Test))
load_trained_model_from_checkpoint() 函数:
用于加载官方训练好的模型模型,参数如下
- config_file:JSON 配置文件路径
- checkpoint_file:checkpoint 文件路径
- training:True 的话,会返回整个模型,否则会忽略 MLM 和 NSP 部分。默认 False
- trainable:模型是否可训练,默认和 training 设置一样
- output_layer_num:多少个FeedForward-Norm层的输出被连接为单个输出。仅在 training 为 False 时可用。默认1
- seq_len:如果这个数值比配置文件中的长度小,position embeddings 会被切成适用于这个长度。默认1e9
使用bert模型的话后面可以加上其他网络用来实现相应的功能。