simpletransformers的 single sentence classification和sentence pair classification

simpletransformers
simple-transformers-configuration

1. 导入相关模块

import warnings
warnings.simplefilter('ignore')

import gc
import os

import numpy as np
import pandas as pd

from sklearn.model_selection import StratifiedKFold

from simpletransformers.classification import ClassificationModel, ClassificationArgs

os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" 
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

2. 读取数据,并处理空值


train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')

train['content'].fillna('', inplace=True)
test['content'].fillna('', inplace=True)

3. 设置模型的参数

TransformerModel具有dict参数,其中包含许多属性,这些属性提供对超参数的控制。

def get_model_args():
    model_args = ClassificationArgs()
    model_args.max_seq_length = 32 # 截取文本长度为128
    model_args.train_batch_size = 16
    model_args.num_train_epochs = 1 # 跑1epoch
    model_args.sliding_window=True     # 使用滑动窗口
    model_args.evaluate_during_training = True # 训练过程中做评估
    model_args.evaluate_during_training_verbose = True
    model_args.fp16 = False
    model_args.no_save = True # 不保存模型
    model_args.save_steps = -1 # 不根据step保存检查点
    model_args.overwrite_output_dir = True # 覆盖输出路径
    model_args.output_dir = dir    # 模型输出路径,默认为/outputs
    return model_args

4. single sentence classification 交叉验证训练模型

4.1 load标准预训练模型:huggingface标准预训练模型

model = ClassificationModel(
    "roberta", "roberta-base"
)

4.2 load社区预训练模型 社区预训练模型

model = ClassificationModel(
    "bert", "KB/bert-base-swedish-cased"
)

4.3 load本地预训练模型

outputs/best_model为本地保存模型的路径。

model = ClassificationModel(
    "bert", "outputs/best_model"
)

4.4 完整交叉验证代码

oof = []
prediction = test[['id']]
prediction['bert_pred'] = 0

n_folds = 3
kfold = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=2021)
for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(train, train['label'])):
    train_df = train.iloc[trn_idx][['content', 'label']]
    valid_df = train.iloc[val_idx][['content', 'label']]
    train_df.columns = ['text', 'label']
    valid_df.columns = ['text', 'label']
    
    model_args = get_model_args()
    model = ClassificationModel('bert',
                                'hfl/chinese-roberta-wwm-ext',# 中文文本train的社区预训练模型
                                args=model_args)
    model.train_model(train_df, eval_df=valid_df)
    #result, vaild_outputs, wrong_predictions = model.eval_model(valid_df)
    # 这里的result输出一些acc,f1之类的指标
    # vaild_outputs 输出的是softmax之前的那个权重
    # wrong_predictions 输出的错误的predict
    _, vaild_outputs, _  = model.eval_model(valid_df)
    
    df_oof = train.iloc[val_idx][['id', 'label']].copy()
    df_oof['bert_pred'] = vaild_outputs[:,1]
    oof.append(df_oof)
    
    print('predict')
    _, test_outputs = model.predict([text for text in test['content']])
    prediction['bert_pred'] += test_outputs[:, 1] / kfold.n_splits
    
    del model, train_df, valid_df, vaild_outputs, test_outputs
    gc.collect()

不同任务所对应的模型

TaskModel
Binary and multi-class text classificationClassificationModel
Conversational AI (chatbot training)ConvAIModel
Language generationLanguageGenerationModel
Language model training/fine-tuningLanguageModelingModel
Multi-label text classificationMultiLabelClassificationModel
Multi-modal classification (text and image data combined)MultiModalClassificationModel
Named entity recognitionNERModel
Question answeringQuestionAnsweringModel
RegressionClassificationModel
Sentence-pair classificationClassificationModel
Text Representation GenerationRepresentationModel
Document RetrievalRetrievalModel

4.5 输出

df_oof = pd.concat(oof)
df_oof = df_oof.sort_values(by='id')
df_oof.head(10)
df_oof[['id', 'bert_pred']].to_csv('roberta_pred_oof.csv', index=False)
prediction[['id', 'bert_pred']].to_csv('roberta_pred_test.csv', index=False)

5. sentence pair classification 交叉验证训练模型

def get_model_args():
    model_args = ClassificationArgs()
    model_args.max_seq_length = 32 # 截取文本长度为128
    model_args.train_batch_size = 16
    model_args.num_train_epochs = 1 # 跑1epoch
    model_args.sliding_window=True     # 使用滑动窗口
    model_args.evaluate_during_training = True # 训练过程中做评估
    model_args.evaluate_during_training_verbose = True
    model_args.fp16 = False
    model_args.no_save = True # 不保存模型
    model_args.save_steps = -1 # 不根据step保存检查点
    model_args.overwrite_output_dir = True # 覆盖输出路径
    model_args.output_dir = dir    # 模型输出路径,默认为/outputs
    return model_args
oof = []
prediction = test[['id']]
prediction['bert_pred'] = 0

n_folds = 3
kfold = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=2021)
for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(train, train['label'])):
    train_df = train.iloc[trn_idx][['level_4', 'content', 'label']]
    valid_df = train.iloc[val_idx][['level_4', 'content', 'label']]
    train_df.columns = ['text_a', 'text_b', 'label']
    valid_df.columns = ['text_a', 'text_b', 'label']
    
    model_args = get_model_args()
    model = ClassificationModel('bert',
                                'hfl/chinese-roberta-wwm-ext',# 中文文本train的社区预训练模型
                                num_labels=2,
                                args=model_args)
    model.train_model(train_df, eval_df=valid_df)
    #result, vaild_outputs, wrong_predictions = model.eval_model(valid_df)
    # 这里的result输出一些acc,f1之类的指标
    # vaild_outputs 输出的是softmax之前的那个权重
    # wrong_predictions 输出的错误的predict
    _, vaild_outputs, _  = model.eval_model(valid_df)
    
    df_oof = train.iloc[val_idx][['id', 'label']].copy()
    df_oof['bert_pred'] = vaild_outputs[:,1]
    oof.append(df_oof)
    
    print('predict')
    _, test_outputs = model.predict([list(text) for text in test[['level_4', 'content']].values])
    prediction['bert_pred'] += test_outputs[:, 1] / kfold.n_splits
    
    del model, train_df, valid_df, vaild_outputs, test_outputs
    gc.collect()
df_oof = pd.concat(oof)
df_oof = df_oof.sort_values(by='id')
df_oof.head(10)
df_oof[['id', 'bert_pred']].to_csv('roberta_pred_oof.csv', index=False)
prediction[['id', 'bert_pred']].to_csv('roberta_pred_test.csv', index=False)

6. sentence-transformers

获取文本相关性

  • 直接使用预训练模型,获取文本相关性
  • 使用训练样本微调之后,获取文本相关性
import numpy as np
import torch
from sentence_transformers import SentenceTransformer, util
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值