QLora微调qwen 云朵喵喵

'''
上述文章转换问答形式。使用json格式描述, json内部生成100条数据。json格式如下:
[
  {
    "instruction": "你是谁",
    "output": "我叫xxx"
  },
  {
    "instruction": "只剩一个心脏了还能活吗?",
    "output": "能,人本来就只有一个心脏。"
  }
]

请继续生成这样的100条数据

'''



import os
import warnings
os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # 要使用的GPU的索引
os.environ['TRANSFORMERS_OFFLINE'] = '1'  # 离线的方式加载模型
# os.environ['DWAN_DISABLED'] = 'true'

warnings.filterwarnings("ignore")  # 忽略警告


import torch

from modelscope import (AutoModelForCausalLM,
                        BitsAndBytesConfig)

# 4bit的压缩量化(32bit --> 4bit)
_bnb_config = BitsAndBytesConfig(load_in_4bit=True,  # 权重将被加载为4位
                                 bnb_4bit_use_double_quant=True,  # 双量化方案:对权重和激活进行量化
                                 bnb_4bit_quant_type="nf4", # 量化类型为nf4:非对称4位浮点数(non-fused 4-bit floating point)
                                 bnb_4bit_compute_dtype=torch.float32) # 进行4位计算的数据类型,计算仍然在32位浮点精度(torch.float32)上进行,有助于保持模型的性能。
# 导入通义千问的模型
_model = AutoModelForCausalLM.from_pretrained(r"D:\vs_code\transformers-code\models\qwen\Qwen2-0.5B",
                                              low_cpu_mem_usage=True,
                                              quantization_config=_bnb_config)



from modelscope import AutoTokenizer

_tokenizer = AutoTokenizer.from_pretrained(r"D:\vs_code\transformers-code\models\qwen\Qwen2-0.5B")



# from transformers import pipeline
# # 正确使用基础模型
# pipe = pipeline("text-generation", 
#                 model=_model,
#                 tokenizer=_tokenizer)
# print(pipe(f"User: 你好 Tom。Assistant:"))





from datasets import load_dataset

_dataset = load_dataset("json", data_files=r"D:\vs_code\transformers-code\QLora\data.json", split="train")
print(_dataset)

#input_tokens : "User:你是谁?Assistant:只剩一个心脏了还能活吗?0000000"
#attention_mask : [1,1,1,0,0,0]
#_labels:[-100,-100,...,1100,15263,....]

'''
example
{
    "instruction": "糖尿病患者应如何选择零食?",
    "output": "糖尿病患者应选择低糖、低脂、高纤维的健康零食,如新鲜水果、坚果等,避免含糖饮料和加工食品。"
  }
  name:{instruction}answer:{output}
'''
def preprocess_dataset(example):
    MAX_LENGTH = 256
    _input_ids, _attention_mask, _labels = [], [], []
    _instruction = _tokenizer(f"User: {example['instruction']}Assistant: ", add_special_tokens=False)
    _response = _tokenizer(example["output"] + _tokenizer.eos_token, add_special_tokens=False)
    _input_ids = _instruction["input_ids"] + _response["input_ids"]
    _attention_mask = _instruction["attention_mask"] + _response["attention_mask"]
    _labels = [-100] * len(_instruction["input_ids"]) + _response["input_ids"]
    if len(_input_ids) > MAX_LENGTH:
        _input_ids = _input_ids[:MAX_LENGTH]
        _attention_mask = _attention_mask[:MAX_LENGTH]
        _labels = _labels[:MAX_LENGTH]
    return {
        "input_ids": _input_ids,
        "attention_mask": _attention_mask,
        "labels": _labels
    }


_dataset = _dataset.map(preprocess_dataset, remove_columns=_dataset.column_names)
_dataset = _dataset.shuffle()
print(_dataset)



from peft import (LoraConfig,
                  get_peft_model,
                  TaskType)

# config = LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules=['q_proj'])  # 对模型中哪部分进行微调
# lora微调
config = LoraConfig(task_type=TaskType.CAUSAL_LM,
                    r=8,
                    target_modules="all-linear")  # 所有权重都参与训练
# 把原来的模型和lora微调部分合并在一起
_model = get_peft_model(_model, config)



# from transformers import (TrainingArguments,
#                           Trainer,
#                           DataCollatorForSeq2Seq)

# _training_args = TrainingArguments(output_dir="checkpoints/qlora",  # 存放训练结果
#                                    overwrite_output_dir=True,
#                                    run_name="qlora_study",
#                                    per_device_train_batch_size=2,
#                                    gradient_accumulation_steps=8, # 梯度累加
#                                    num_train_epochs=200,
#                                    save_steps=50,
#                                    logging_steps=50,
#                                    report_to="none",
#                                    optim="paged_adamw_32bit")

# trainer = Trainer(
#     model=_model,
#     args=_training_args,
#     train_dataset=_dataset,
#     data_collator=DataCollatorForSeq2Seq(tokenizer=_tokenizer, padding=True),
# )

# trainer.train()



# from transformers import pipeline
# from peft import PeftModel

# peft_model = PeftModel.from_pretrained(model=_model,
#                                        model_id=r"D:\vs_code\transformers-code\checkpoints\qlora\checkpoint-50")

# pipe = pipeline("text-generation", 
#                 model=peft_model,
#                 tokenizer=_tokenizer)
# print(pipe(f"User: 糖尿病患者如何应对节日聚餐?Assistant:", do_sample=True))



# from transformers import pipeline
# from peft import PeftModel

# peft_model = PeftModel.from_pretrained(model=_model,
#                                        model_id=r"D:\vs_code\transformers-code\checkpoints\qlora\checkpoint-50")

# # 保存训练好的模型
# peft_model = peft_model.merge_and_unload()
# peft_model.save_pretrained("myqwen2-0.5b")



from transformers import pipeline
import torch
from modelscope import (AutoModelForCausalLM,
                        BitsAndBytesConfig)
from modelscope import AutoTokenizer

_tokenizer = AutoTokenizer.from_pretrained(r"D:\vs_code\transformers-code\checkpoints\qlora\checkpoint-50")

# 4bit的压缩量化(32bit --> 4bit)
_bnb_config = BitsAndBytesConfig(load_in_4bit=True,  # 权重将被加载为4位
                                 bnb_4bit_use_double_quant=True,  # 双量化方案:对权重和激活进行量化
                                 bnb_4bit_quant_type="nf4", # 量化类型为nf4:非对称4位浮点数(non-fused 4-bit floating point)
                                 bnb_4bit_compute_dtype=torch.float32) # 进行4位计算的数据类型,计算仍然在32位浮点精度(torch.float32)上进行,有助于保持模型的性能。
# 导入通义千问的模型
_model = AutoModelForCausalLM.from_pretrained(r"D:\vs_code\transformers-code\myqwen2-0.5b",
                                              low_cpu_mem_usage=True,
                                              quantization_config=_bnb_config)
pipe = pipeline("text-generation", 
                model=_model,
                tokenizer=_tokenizer)
pipe(f"User: 糖尿病患者如何应对节日聚餐?Assistant:", do_sample=True)

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值