测试数据
CUDA_VISIBLE_DEVICES=1 python -m llmuses.run --model qwen/Qwen2-7B-Instruct --template-type qwen --datasets custom_dialog --dataset-hub Local --dataset-args '{"custom_dialog": {"local_path": "/root/eval-scope/data/custom_dialog","subset_list":["basic"]}}' --dataset-dir /root/eval-scope/data/
python llmuses/run.py --model qwen/Qwen2-7B-Instruct --template-type qwen --datasets arc --dataset-hub Local --dataset-args '{"arc": {"local_path": "/root/eval-scope/data/arc"}}' --dataset-dir /root/eval-scope/data/
custom_dialog
__init__.py
# Copyright (c) Alibaba, Inc. and its affiliates.
from llmuses.benchmarks.custom_dialog.custom_dialog_adapter import DATASET_ID, SUBSET_LIST, CustomDialogAdapter
from llmuses.benchmarks.custom_dialog.custom_dialog_adapter import CustomDialogAdapter as DataAdapterClass
from llmuses.models.model_adapter import ChatGenerationModelAdapter as ModelAdapterClass # noqa
custom_dialog.py
# Copyright (c) Alibaba, Inc. and its affiliates.
# Copyright (c) Allen Institute, and its affiliates.
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
"""AI2 ARC (Abstraction and Reasoning Corpus) for General Artificial Intelligence Benchmark."""
"""AUTO GENERATED, DO NOT EDIT"""
import json
import os
import datasets
# flake8: noqa
_CITATION = """\
@article{allenai:arc,
author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
journal = {arXiv:1803.05457v1},
year = {2018},
}
"""
_DESCRIPTION = """\
A new dataset of 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in
advanced question-answering. The dataset is partitioned into a Challenge Set and an Easy Set, where the former contains
only questions answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. We are also
including a corpus of over 14 million science sentences relevant to the task,
and an implementation of three neural baseline models for this dataset. We pose ARC as a challenge to the community.
ARC-Easy:
train: 2251
test: 2376
validation: 570
ARC-Challenge:
train: 1119
test: 1172
validation: 299
"""
_URL = 'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/arc/ARC-V1-Feb2018.zip'
# tasks: ['ARC-Easy', 'ARC-Challenge']
class CustomDialogConfig(datasets.BuilderConfig):
"""BuilderConfig for Ai2ARC."""
def __init__(self, **kwargs):
"""BuilderConfig for Ai2Arc.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CustomDialogConfig, self).__init__(version=datasets.Version('1.0.0', ''), **kwargs)
class CustomDialog(datasets.GeneratorBasedBuilder):
"""
The AI2 Reasoning Challenge (ARC) dataset.
Subset: ARC-Easy, AR