测试数据
CUDA_VISIBLE_DEVICES=1 python -m llmuses.run --model qwen/Qwen2-7B-Instruct --template-type qwen --datasets custom_dialog --dataset-hub Local --dataset-args '{"custom_dialog": {"local_path": "/root/eval-scope/data/custom_dialog","subset_list":["basic"]}}' --dataset-dir /root/eval-scope/data/
python llmuses/run.py --model qwen/Qwen2-7B-Instruct --template-type qwen --datasets arc --dataset-hub Local --dataset-args '{"arc": {"local_path": "/root/eval-scope/data/arc"}}' --dataset-dir /root/eval-scope/data/
custom_dialog
__init__.py
# Copyright (c) Alibaba, Inc. and its affiliates.
from llmuses.benchmarks.custom_dialog.custom_dialog_adapter import DATASET_ID, SUBSET_LIST, CustomDialogAdapter
from llmuses.benchmarks.custom_dialog.custom_dialog_adapter import CustomDialogAdapter as DataAdapterClass
from llmuses.models.model_adapter import ChatGenerationModelAdapter as ModelAdapterClass # noqa
custom_dialog.py
# Copyright (c) Alibaba, Inc. and its affiliates.
# Copyright (c) Allen Institute, and its affiliates.
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
"""AI2 ARC (Abstraction and Reasoning Corpus) for General Artificial Intelligence Benchmark."""
"""AUTO GENERATED, DO NOT EDIT"""
import json
import os
import datasets
# flake8: noqa
_CITATION = """\
@article{allenai:arc,
author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
journal = {arXiv:1803.05457v1},
year = {2018},
}
"""
_DESCRIPTION = """\
A new dataset of 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in
advanced question-answering. The dataset is partitioned into a Challenge Set and an Easy Set, where the former contains
only questions answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. We are also
including a corpus of over 14 million science sentences relevant to the task,
and an implementation of three neural baseline models for this dataset. We pose ARC as a challenge to the community.
ARC-Easy:
train: 2251
test: 2376
validation: 570
ARC-Challenge:
train: 1119
test: 1172
validation: 299
"""
_URL = 'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/arc/ARC-V1-Feb2018.zip'
# tasks: ['ARC-Easy', 'ARC-Challenge']
class CustomDialogConfig(datasets.BuilderConfig):
"""BuilderConfig for Ai2ARC."""
def __init__(self, **kwargs):
"""BuilderConfig for Ai2Arc.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CustomDialogConfig, self).__init__(version=datasets.Version('1.0.0', ''), **kwargs)
class CustomDialog(datasets.GeneratorBasedBuilder):
"""
The AI2 Reasoning Challenge (ARC) dataset.
Subset: ARC-Easy, ARC-Challenge.
"""
VERSION = datasets.Version('1.0.0')
BUILDER_CONFIGS = [
CustomDialogConfig(
name='dialogData',
description="""\
Challenge Set of 2590 “hard” questions (those that both a retrieval and a co-occurrence method fail to answer correctly)
""",
)
]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
'question': datasets.Value('question'),
'answer': datasets.Value('answer')
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage='https://allenai.org/data/arc',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download(self.config.url)
return [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
'filepath': data_dir,
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding='utf-8') as f:
for row in f:
data = json.loads(row)
question = data['question']
answer = data['answer']
id_ = data['id']
yield id_, {
'id': id_,
'question': question,
'answer': answer,
}
custom_dialog_adapter.py
# Copyright (c) Alibaba, Inc. and its affiliates.
import os
import json
from llmuses.benchmarks.data_adapter import DataAdapter
from llmuses.metrics.metrics import exact_match, weighted_mean
from llmuses.utils import normalize_score, ResponseParser
from llmuses.utils.logger import get_logger
from llmuses.utils import normalize_score, jsonl_to_list
from llmuses.metrics.metrics import bleu_ngram_one_sample, weighted_mean,bleu
from llmuses.metrics.rouge_metric import compute_rouge_score_one_sample_zh
from collections import defaultdict
# flake8: noqa
logger = get_logger()
DATASET_ID = 'modelscope/custom_dialog'
# task_list = ['ARC-Easy', 'ARC-Challenge']
SUBSET_LIST = ['basic']
class CustomDialogAdapter(DataAdapter):
def __init__(self,
subset_list: list = None,
metric_list: list = None,
few_shot_num: int = None,
train_split: str = None,
eval_split: str = 'validation',
prompt_template: str = '',
**kwargs):
if subset_list is None:
subset_list = SUBSET_LIST
if metric_list is None:
metric_list = [{'name': 'WeightedAverageAccuracy', 'object': weighted_mean}]
if few_shot_num is None:
# Use 0-shot by default
logger.info(f'Set 0-shot examples by system for ARC.')
few_shot_num = 0
if few_shot_num != 0:
logger.warning(f'few_shot_num is recommended to set 0 for ARC, got {few_shot_num}.')
super().__init__(subset_list=subset_list,
metric_list=metric_list,
few_shot_num=few_shot_num,
train_split=train_split,
eval_split=eval_split,
prompt_template=prompt_template,
**kwargs)
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
"""
Load the dataset from local disk.
dataset_name_or_path: str, the dataset id or path. e.g. 'arc'
subset_list: list, the subset list to load. e.g. ['ARC-Easy', 'ARC-Challenge']
work_dir: str, the local root data directory. e.g. '/path/to/data'
kwargs: dict, other arguments.
"""
data_dict = {}
for subset_name in subset_list:
data_dict[subset_name] = {}
file_path = os.path.join(work_dir, dataset_name_or_path, subset_name, 'trans_task.json')
if os.path.exists(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
logger.info(file_path)
rows = []
data = json.load(f)
for item in data:
logger.info('item:' + " ".join(str(item) for item in item))
new_item = {
'input': [
{"role": "system", "content": "把下面内容翻译成英文."},
{"role": "user", "content": item['question']}
],
'ideal': item['answer']
}
rows.append(new_item)
data_dict[subset_name]['validation'] = rows
else:
logger.error('路径不存在:' + file_path)
return data_dict
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
"""
Generate model prompt from raw data, unify the prompt format for ARC benchmark.
Args:
input_d (dict): The raw input. A single data format of the ARC:
{
'id': 'Mercury_7220990',
'question': 'Which factor will most likely cause a person to develop a fever?',
'choices':
{
'text':['a leg muscle relaxing after exercise',
'a bacterial population in the bloodstream',
'several viral particles on the skin',
'carbohydrates being digested in the stomach'],
'label': ['A', 'B', 'C', 'D']
},
'answerKey': 'B'
}
Returns:
{'data': ['xxx'], 'multi_choices': ['A', 'B', 'C', 'D']}
"""
# context: str = '请把下面的文字翻译成英文,尽可能的准确' + '\n\n ' + input_d['question'] + '\n'
def get_sys_prompt(inp: dict) -> str:
return inp['input'][0]['content']
prompt = get_sys_prompt(input_d)
few_shot_prompts = [self._generate_prompt(input_d=sample, include_answer=True) for sample in few_shot_list]
context: str = '\n'.join(few_shot_prompts) + '\n'
context += self._generate_prompt(input_d=input_d, include_answer=False)
full_prompt = prompt + context
return {'data': [full_prompt]}
def get_gold_answer(self, input_d: dict) -> str:
# Get the gold choice
return input_d.get('ideal', '')
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
"""
Parse the model output to get the answer. Could be the best choice index.
Args:
result: Predicted answer from the model. Usually a string for chat.
raw_input_d (dict): The raw input. Depending on the dataset.
eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
Returns:
The parsed answer. Depending on the dataset. Usually a string for chat.
"""
logger.info('result:' + result)
if eval_type == 'checkpoint':
return result
elif eval_type == 'service':
return ResponseParser.parse_first_option_with_choices(text=result,
options=self.choices) # TODO: to be checked !
elif eval_type == 'custom':
return ResponseParser.parse_first_option_with_choices(text=result,
options=self.choices) # TODO: to be checked !
else:
raise ValueError(f'Invalid eval_type: {eval_type}')
def match(self, gold: str, pred: str) -> float:
# return exact_match(gold=gold, pred=pred)
# 删除下面这一行,返回 res
item = [(gold, pred)]
res = dict()
rouge_dict = compute_rouge_score_one_sample_zh([pred], [gold])
bleu_dict = bleu_ngram_one_sample(pred, gold)
res.update(rouge_dict)
res.update(bleu_dict)
return bleu(item)
# return res
def compute_metric(self, review_res_list: list) -> float:
"""
Compute evaluation result by specific metric.
Args:
review_res_list: review score list, e.g. [0, 1, 1, 0, ...]
Returns:
The metric score.
"""
# items = defaultdict(list)
# for scores in review_res_list:
# for k, v in scores.items():
# items[k].append((v, 1.0))
# # items = [(score, 1.0) for score in review_res_list]
# res = {k: weighted_mean(v) for k, v in items.items()}
# # return weighted_mean(items)
# return res
items = [(score, 1.0) for score in review_res_list]
return weighted_mean(items)
def gen_report(self, subset_score_map: dict, report_name: str = None) -> dict:
"""
Args:
subset_score_map: {subset_name: (score_dict, num), ...}
report_name: str, the user-defined report name.
Returns:
{
"name":"GeneralQA",
"metric":"WeightedAverageBLEU",
"score":0.399,
"category":[
{
"name":"DEFAULT",
"score":0.399,
"subset":[
{
"name":"default",
"score":0.399
},
]
}
],
"total_num":10
}
"""
# total_num: int = sum([num for _, num in subset_score_map.values()])
# # weighted_avg_bleu: float = sum([score * num for score, num in subset_score_map.values()]) / total_num
# cate_avg_list = [{'name': subset_name, 'score': score_dict} for subset_name, (score_dict, _) in
# subset_score_map.items()]
# total_avg_list = defaultdict(float)
# for score_dict, num in subset_score_map.values():
# for metric, score in score_dict.items():
# total_avg_list[metric] += score * num / total_num
#
# category_d = dict(name="DEFAULT",
# score=total_avg_list,
# subset=cate_avg_list)
#
# res_map = dict(name=report_name or "custom_dialog",
# metric=self.metric_list[0]['name'],
# score=total_avg_list,
# category=[category_d],
# total_num=total_num)
#
# return res_map
total_num: int = sum([num for _, num in subset_score_map.values()])
weighted_avg_acc: float = sum([score * num for score, num in subset_score_map.values()]) / total_num
cate_avg_list = [{'name': subset_name, 'score': score} for subset_name, (score, _) in subset_score_map.items()]
category_d = dict(name='DEFAULT',
score=weighted_avg_acc,
subset=cate_avg_list)
res_map = dict(name=report_name or 'trivia_qa',
metric=self.metric_list[0]['name'],
score=weighted_avg_acc,
category=[category_d],
total_num=total_num)
return res_map
@classmethod
def _generate_prompt(cls, input_d: dict, include_answer=False) -> str:
example: str = f" {input_d['input'][1]['content']}\nAnswer:"
if include_answer:
example += f" {input_d['ideal'][0]}\n\n"
return example