记录一下deepspeed微调大模型的脚本,来自Llama2-Chinese
训练脚本
启动脚本
# pretrain.sh
output_model=/mnt/data/output
if [ ! -d ${output_model} ];then
mkdir ${output_model}
fi
cp ./pretrain.sh ${output_model}
cp ./ds_config_zero*.json ${output_model}
deepspeed --num_gpus 8 pretrain_clm.py \
--model_name_or_path meta-llama/Llama-2-7b-chat-hf \
--train_files ../../data/train_sft.csv \
--validation_files ../../data/dev_sft.csv \
../../data/dev_sft_sharegpt.csv \
--per_device_train_batch_size 10 \
--per_device_eval_batch_size 10 \
--do_train \
--output_dir ${output_model} \
--evaluation_strategy steps \
--use_fast_tokenizer false \
--max_eval_samples 500 \
--learning_rate 3e-5 \
--gradient_accumulation_steps 4 \
--num_train_epochs 3 \
--warmup_steps 10000 \
--logging_dir ${output_model}/logs \
--logging_strategy steps \
--logging_steps 2 \
--save_strategy steps \
--preprocessing_num_workers 10 \
--save_steps 500 \
--eval_steps 500 \
--save_total_limit 2000 \
--seed 42 \
--disable_tqdm false \
--ddp_find_unused_parameters false \
--block_size 4096 \
--overwrite_output_dir \
--report_to tensorboard \
--run_name ${output_model} \
--bf16 \
--bf16_full_eval \
--gradient_checkpointing \
--deepspeed ./ds_config_zero3.json \
--ignore_data_skip true \
--ddp_timeout 18000000 \
| tee -a ${output_model}/train.log
deepspeed配置文件
# ds_config_zero2.json
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"last_batch_iteration": -1,
"total_num_steps": "auto",
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients": true
},
"activation_checkpointing": {
"partition_activations": false,
"cpu_checkpointing": false,
"contiguous_memory_optimization": false,
"number_checkpoints": null,
"synchronize_checkpoint_boundary": false,
"profile": false
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"min_lr": 5e-7,
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
# ds_config_zero3.json
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1,
"fp16_opt_level": "O2"
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"last_batch_iteration": -1,
"total_num_steps": "auto",
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"gather_16bit_weights_on_model_save": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
微调脚本
启动脚本
# finetune.sh
output_model=save_folder
if [ ! -d ${output_model} ];then
mkdir ${output_model}
fi
cp ./finetune.sh ${output_model}
deepspeed --include localhost:0,1 finetune_clm.py \
--model_name_or_path meta-llama/Llama-2-7b-chat-hf \
--train_files ../../data/train_sft.csv \
--validation_files ../../data/dev_sft.csv \
../../data/dev_sft_sharegpt.csv \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--do_train \
--do_eval \
--use_fast_tokenizer false \
--output_dir ${output_model} \
--evaluation_strategy steps \
--max_eval_samples 800 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 8 \
--num_train_epochs 10 \
--warmup_steps 400 \
--logging_dir ${output_model}/logs \
--logging_strategy steps \
--logging_steps 10 \
--save_strategy steps \
--preprocessing_num_workers 10 \
--save_steps 20 \
--eval_steps 20 \
--save_total_limit 2000 \
--seed 42 \
--disable_tqdm false \
--ddp_find_unused_parameters false \
--block_size 2048 \
--report_to tensorboard \
--overwrite_output_dir \
--deepspeed ds_config_zero2.json \
--ignore_data_skip true \
--bf16 \
--gradient_checkpointing \
--bf16_full_eval \
--ddp_timeout 18000000 \
| tee -a ${output_model}/train.log
deepspeed配置文件
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"last_batch_iteration": -1,
"total_num_steps": "auto",
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients": true
},
"activation_checkpointing": {
"partition_activations": false,
"cpu_checkpointing": false,
"contiguous_memory_optimization": false,
"number_checkpoints": null,
"synchronize_checkpoint_boundary": false,
"profile": false
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"min_lr": 5e-7,
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}