conda activate opencompass
pip install protobuf tabulate mmengine tqdm prettytable datasets transformers jieba scikit-learn evaluate rouge_chinese nltk rank_bm25 sentence_transformers tiktoken absl-py fuzzywuzzy sentencepiece python-Levenshtein einops accelerate
git clone -b 0.2.4 https://github.com/open-compass/opencompass
cd opencompass
pip install -e .
cp /share/temp/datasets/OpenCompassData-core-20231110.zip /root/code/opencompass/
unzip OpenCompassData-core-20231110.zip
cp -r ./data/ceval/formal_ceval/test/* ./data/ceval/formal_ceval/
查看支持的数据集和模型,列出所有跟 InternLM 及 C-Eval 相关的配置
python tools/list_configs.py internlm ceval
cd /root/opencompass/
export MKL_SERVICE_FORCE_INTEL=1
python run.py --datasets ceval_gen --hf-path /share/new_models/Shanghai_AI_Laboratory/internlm2-chat-1_8b --tokenizer-path /share/new_models/Shanghai_AI_Laboratory/internlm2-chat-1_8b --tokenizer-kwargs padding_side='left' truncation='left' trust_remote_code=True --model-kwargs trust_remote_code=True device_map='auto' --max-seq-len 1024 --max-out-len 16 --batch-size 2 --hf-num-gpus 1 --debug
启动命令字段含义
python run.py
--datasets ceval_gen \
--hf-path /share/new_models/Shanghai_AI_Laboratory/internlm2-chat-1_8b \ # HuggingFace 模型路径
--tokenizer-path /share/new_models/Shanghai_AI_Laboratory/internlm2-chat-1_8b \ # HuggingFace tokenizer 路径(如果与模型路径相同,可以省略)
--tokenizer-kwargs padding_side='left' truncation='left' trust_remote_code=True \ # 构建 tokenizer 的参数
--model-kwargs device_map='auto' trust_remote_code=True \ # 构建模型的参数
--max-seq-len 1024 \ # 模型可以接受的最大序列长度
--max-out-len 16 \ # 生成的最大 token 数, 如果是多轮对话场景就要设置更大
--batch-size 2 \ # 批量大小
--hf-num-gpus 1 \# 运行模型所需的 GPU 数量
--work-dir '/root/temp/opencompass' \ # 保存评估结果过程信息的目录,默认在outputs/default下
--reuse latest \ # 断点续存,latest可以替换成工作目录outputs/default下得某个时间戳文件,指定文件进行续存
--debug # 写该参数控制台输出, 不写该参数会输出到outputs/logs/ 目录下