赛题
高度口语化强背景知识的文本观点分类
序号 | 主贴内容 | 评论内容 | 分类 |
---|---|---|---|
1 | 早盘收盘股价成交额在40亿左右,那么说明今天相对周五一定是放量的走势,假如收盘是涨停的话,那今天的高位震旦强分歧走一致就厉害了,明天就是缩量板的预期。且看且操作。这个位置不拉反而下杀的话达不到出货的目的,因为容易吓走套利盘,恐慌盘,止损盘,理想的剧本是边拉边出最舒服了。。。如果断板的话,也一定收红,明天看弱转强,收绿的话有点悬了。今天开盘价没有问题,高开高走也没有问题,但是后面就看分歧是否能转一致,市场合力的考验 | 你股龄一年,怎么这么有底气判断跌涨到什么价位? | 攻击用户 |
2 | 我来梳理一下逻辑吧,说的不对大家指正,竞标应该昨晚就结束了,但是一点消息都没有,今天股价也是冲高回落,按理说主力应该比我们散户提前知道消息,中标结果一出,应该疯狂进货才对,但今天一天主力资金都是流出的,别说这个不准,参考21号龙虎榜,所以如果下午尾盘之前没有拉升迹象,就该先走为敬了 | 看了下主页,股龄2个月抓涨停板这么厉害!你是神仙不是人! | 未攻击用户 |
baseline
bert一把梭
requirements
tensorflow
keras
keras-bert
#keras2.7,tensorflow2.5?按自己cuda版本来吧。
import pandas as pd
import codecs, gc
import numpy as np
from sklearn.model_selection import KFold
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
# import tensorflow.keras as keras
from tensorflow.keras.metrics import top_k_categorical_accuracy,Recall
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
import tensorflow_addons as tfa
import tensorflow as tf
label_dict = {'未攻击用户':0,'攻击用户':1}
train_df = pd.read_table('./train/train.txt')
train_df['label'] = train_df['label'].apply(lambda x:label_dict[x])
train_df['token'] = train_df['context'] + train_df['query']
test_df = pd.read_table('./test_A/test_data_A.txt')
test_df['token'] = test_df['context'] + test_df['query']
# file_path = r'D:\ziyong\chinese_roberta_wwm_large_ext_L-24_H-1024_A-16/'
file_path = r'D:\bisai\chinese_rbtl3_L-3_H-1024_A-16/'
config_path = file_path+'bert_config_rbtl3.json'
checkpoint_path = file_path+'bert_model.ckpt'
dict_path = file_path+'vocab.txt'
maxlen=120
token_dict = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
#重写tokenizer
class OurTokenizer(Tokenizer):
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]') # 用[unused1]来表示空格类字符
else:
R.append('[UNK]') # 不在列表的字符用[UNK]表示
return R
tokenizer = OurTokenizer(token_dict)
def seq_padding(X, padding=0):
L = [len(x) for x in X]
ML = max(L)
return np.array([
np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
])
class data_generator:
def __init__(self, data, batch_size=32, shuffle=True):
self.data = data
self.batch_size = batch_size
self.shuffle = shuffle
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
idxs = list(range(len(self.data)))
if self.shuffle:
np.random.shuffle(idxs)
X1, X2, Y = [], [], []
for i in idxs:
d = self.data[i]
text = d[0][:maxlen]
x1, x2 = tokenizer.encode(first=text)
y = d[1]
X1.append(x1)
X2.append(x2)
Y.append([y])
if len(X1) == self.batch_size or i == idxs[-1]:
X1 = seq_padding(X1)
X2 = seq_padding(X2)
Y = seq_padding(Y)
yield [X1, X2], Y[:, 0, :]
[X1, X2, Y] = [], [], []
#计算top-k正确率,当预测值的前k个值中存在目标类别即认为预测正确
def acc_top2(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=2)
def micro_f1_tfaddon(y_true, y_pred, num_class=2):
metric = tfa.metrics.F1Score(num_classes=num_class, average='micro')
y_pred = tf.one_hot(tf.argmax(y_pred, 1), depth=num_class)
metric.update_state(y_true, y_pred)
result = metric.result()
return result.numpy()
# return result
#bert模型设置
def build_bert(nclass):
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None) #加载预训练模型
for l in bert_model.layers:
l.trainable = True
x1_in = Input(shape=(None,))
x2_in = Input(shape=(None,))
x = bert_model([x1_in, x2_in])
x = Lambda(lambda x: x[:, 0])(x) # 取出[CLS]对应的向量用来做分类
x = Dense(1000,activation='relu')(x)
p = Dense(nclass, activation='softmax')(x)
model = Model([x1_in, x2_in], p)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(1e-5), #用足够小的学习率
metrics=['accuracy', tfa.metrics.F1Score(num_classes=2, average='micro')])
print(model.summary())
return model
#训练数据、测试数据和标签转化为模型输入格式
DATA_LIST = []
for data_row in train_df.iloc[:].itertuples():
DATA_LIST.append((data_row.token, to_categorical(data_row.label, 2)))
DATA_LIST = np.array(DATA_LIST)
DATA_LIST_TEST = []
for data_row in test_df.iloc[:].itertuples():
DATA_LIST_TEST.append((data_row.token, to_categorical(0, 2)))
DATA_LIST_TEST = np.array(DATA_LIST_TEST)
#交叉验证训练和测试模型
def run_cv(nfold, data, data_labels, data_test):
kf = KFold(n_splits=nfold, shuffle=True, random_state=7017).split(data)
train_model_pred = np.zeros((len(data), 2))
test_model_pred = np.zeros((len(data_test), 2))
for i, (train_fold, test_fold) in enumerate(kf):
X_train, X_valid, = data[train_fold, :], data[test_fold, :]
model = build_bert(2)
early_stopping = EarlyStopping(monitor='val_accuracy', patience=3) #早停法,防止过拟合
plateau = ReduceLROnPlateau(monitor="val_accuracy", verbose=1, mode='max', factor=0.5, patience=2) #当评价指标不在提升时,减少学习率
checkpoint = ModelCheckpoint('./bert_dump/' + str(i)+'{epoch:02d}-{val_accuracy:.5f}-{val_f1_score:.5f}.hdf5', monitor='val_accuracy',verbose=2, save_best_only=True, mode='max', save_weights_only=True) #保存最好的模型
train_D = data_generator(X_train, shuffle=True)
valid_D = data_generator(X_valid, shuffle=True)
test_D = data_generator(data_test, shuffle=False)
#模型训练
model.fit_generator(
train_D.__iter__(),
steps_per_epoch=len(train_D),
epochs=5,
validation_data=valid_D.__iter__(),
validation_steps=len(valid_D),
callbacks=[early_stopping, plateau, checkpoint],
)
# model.load_weights('./bert_dump/' + str(i) + '.hdf5')
# return model
train_model_pred[test_fold, :] = model.predict_generator(valid_D.__iter__(), steps=len(valid_D), verbose=1)
test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D), verbose=1)
del model
gc.collect() #清理内存
K.clear_session() #clear_session就是清除一个session
# break
return train_model_pred, test_model_pred
#n折交叉验证
train_model_pred, test_model_pred = run_cv(5, DATA_LIST, None, DATA_LIST_TEST)
test_pred = [np.argmax(x) for x in test_model_pred]
没用特征工程,而且特征比较明显,比如说带上:逼、死等脏话会对分数有提升。
可以使用树模型进行融合:https://mp.weixin.qq.com/s/S7CVNzeV8hME6qFA4sq8LA