from sentence_transformers import SentenceTransformer, SentencesDataset
from sentence_transformers import InputExample, evaluation, losses
from torch.utils.data import DataLoader
import os
import random
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
model = SentenceTransformer('../pre_model/distiluse-base-multilingual-cased')
# 加载数据集
#文件内容格式: ('气性坏疽', '产气荚膜杆菌感染', 1.0)
def load_data(filename):
datas = []
with open(filename, encoding='utf-8') as f:
for l in f:
l = l.strip()
ls = eval(l)
if ls is None:
continue
datas.append(ls)
random.shuffle(datas)
return datas
datas = load_data('../data/train_data3.txt')
print('train_datas len:',len(datas))
eval_datas = load_data('../data/eval_data3.txt')
print('eval_datas len:',len(eval_datas))
# 构造训练数据
train_datas = []
for i in datas:
train_datas.append(InputExample(texts=[i[0], i[1]], label=float(i[2])))
# 构造验证数据
sentences1,sentences2,scores = [],[],[]
for i in eval_datas:
sentences1.append(i[0])
sentences2.append(i[1])
scores.append(float(i[2]))
evaluator = evaluation.EmbeddingSimilarityEvaluator(sentences1, sentences2, scores)
train_dataset = SentencesDataset(train_datas, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=128)
# 相似度任务,label相似为小数
train_loss = losses.CosineSimilarityLoss(model)
# 分类任务,label为整数
# train_loss = losses.SoftmaxLoss(model,sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=3)
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=10, warmup_steps=100,show_progress_bar=True,
evaluator=evaluator, evaluation_steps=300, output_path='./my_albert_similarity_model2')
model.evaluate(evaluator)
使用sentence-transformer进行fine-tune
最新推荐文章于 2023-07-21 18:54:01 发布