解决问题:测试中文文本的BLEU指标,可用于评估大模型性能。
# -*- coding: utf-8 -*-
import nltk
from nltk.translate.bleu_score import SmoothingFunction
import jieba
# 将句子分词并转换为n-gram格式
def sentence_to_ngrams(sentence, n):
words = jieba.lcut(sentence)
return set(nltk.ngrams(words, n))
# 计算BLEU指标
def calculate_bleu(reference, candidate, max_n=4):
smooth = SmoothingFunction().method4
scores = []
for n in range(1, max_n + 1):
reference_ngrams = sentence_to_ngrams(reference, n)
print("ref-------------------")
print(reference_ngrams)
print("-------------------")
candidate_ngrams = sentence_to_ngrams(candidate, n)
print("can-------------------")
print(candidate_ngrams)
print("-------------------")
scores.append(nltk.translate.bleu_score.sentence_bleu([reference_ngrams], candidate_ngrams, smoothing_function=smooth))
return scores
# 测试数据
reference = "桌子上有只猫"
candidate = "这个桌子上有个狸花猫"
# 计算BLEU指标
bleu_scores = calculate_bleu(reference, candidate)
# 输出结果
for n, score in enumerate(bleu_scores, start=1):
print(f'BLEU-{n} Score:', score)
运行结果
BLEU-1
BLEU-2
BLEU-3
BLEU-4
得分