CUDA_VISIBLE_DEVICES=0 python src/train_bash.py
–stage sft
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b-int4
–do_train
–dataset /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/data/self_cognition.json
–finetuning_type lora
–output_dir path_to_sft_checkpoint
–per_device_train_batch_size 4
–gradient_accumulation_steps 4
–lr_scheduler_type cosine
–logging_steps 10
–save_steps 1000
–learning_rate 5e-5
–num_train_epochs 3.0
–plot_loss
–fp16
CUDA_VISIBLE_DEVICES=0 python src/train_sft.py \
--do_train \
--use_v2 \
--dataset self_cognition \
--finetuning_type lora \
--lora_rank 32 \
--output_dir cognition \
--overwrite_cache \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 2 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--warmup_steps 0 \
--learning_rate 1e-3 \
--num_train_epochs 10.0 \
--fp16
python src/cli_demo.py
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–finetuning_type lora
–checkpoint_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/cognition/checkpoint-9900
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b \
--do_train \
--dataset self_cognition \
--finetuning_type lora \
--output_dir path_to_sft_checkpoint \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
训练 10000step fail!
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py
–stage sft
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–do_train
–dataset self_cognition
–finetuning_type lora
–output_dir cognition
–overwrite_cache
–per_device_train_batch_size 2
–gradient_accumulation_steps 2
–lr_scheduler_type cosine
–logging_steps 10
–save_steps 100
–warmup_steps 0
–learning_rate 1e-3
–num_train_epochs 500.0
–fp16
测试
python src/cli_demo.py
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–finetuning_type lora
–checkpoint_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/cognition/checkpoint-9900
训练step 200成功!
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py
–stage sft
–do_train
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–dataset self_cognition
–finetuning_type lora
–output_dir cognition
–overwrite_cache
–per_device_train_batch_size 2
–gradient_accumulation_steps 2
–lr_scheduler_type cosine
–logging_steps 10
–save_steps 1000
–warmup_steps 0
–learning_rate 1e-3
–num_train_epochs 10.0
–fp16
测试
python src/cli_demo.py
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–finetuning_type lora
–checkpoint_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/cognition
python src/web_demo.py
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–finetuning_type lora
–checkpoint_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/cognition
导出
python src/export_model.py \
--model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b \
--finetuning_type lora \
--checkpoint_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/cognition\
--output_dir muziAI
aifeng_start
训练step 200成功!
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py
–stage sft
–do_train
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–dataset self_cognition_cp
–finetuning_type lora
–output_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/my_data/output_checkpoint
–overwrite_cache
–per_device_train_batch_size 2
–gradient_accumulation_steps 2
–lr_scheduler_type cosine
–logging_steps 10
–save_steps 1000
–warmup_steps 0
–learning_rate 1e-3
–num_train_epochs 10.0
–fp16
python src/cli_demo.py
–model_name_or_path /home/rkwork/work_place/project/ChatGLM2-6B/models/chatglm2-6b
–finetuning_type lora
–checkpoint_dir /home/rkwork/work_place/project/ChatGLM2-6B/ChatGLM-Efficient-Tuning/my_data/output_checkpoint