accelerate+deepspeed多机多卡训练的两种方法 - 知乎
单节点训练:
# Move into the first step of the pipeline cd training/step1_supervised_finetuning/
# Run the training script
bash training_scripts/single_gpu/ run_1.3b.sh
# Evaluate the model
bash evaluation_scripts/ run_prompt.sh
run_1.3b.sh脚本:
# DeepSpeed Team
OUTPUT=$1
ZERO_STAGE=$2
if [ "$OUTPUT" == "" ]; then
OUTPUT=./output
fi
if [ "$ZERO_STAGE" == "" ]; then
ZERO_STAGE=2
fi
mkdir -p $OUTPUT
deepspeed main.py \
--data_path Dahoas/rm-static Dahoas/full-hh-rlhf Dahoas/synthetic-instruct-gptj-pairwise yitingxie/rlhf-reward-datasets \
--data_split 2,4,4 \
--model_name_or_path facebook/opt-1.3b \
--