基于Transformer Models模型完成学习训练模型

在编程之前需要准备一些文件:

首先,先win+R打开运行框,输入:PowerShell后

输入:

pip install -U huggingface_hub

下载完成后,指定我们的环境变量:

$env:HF_ENDPOINT = "https://hf-mirror.com"

然后下载模型:

huggingface-cli download --resume-download gpt2 --local-dir "D:\Pythonxiangmu\PythonandAI\Transformer Models\gpt-2"

工程目录地址

然后下载数据量:

huggingface-cli download --repo-type dataset --resume-download wikitext --local-dir "D:\Pythonxiangmu\PythonandAI\Transformer Models\gpt-2"

工程目录地址在PowerShell中下载完这些后,可以开始代码啦

import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    AdamW,
    get_linear_schedule_with_warmup,
    set_seed,
)
from torch.optim import AdamW

# 设置随机种子以确保结果可复现
set_seed(42)


class TextDataset(Dataset):
    def __init__(self, tokenizer, texts, block_size=128):
        self.tokenizer = tokenizer
        self.examples = [
            self.tokenizer(text, return_tensors="pt", padding='max_length', truncation=True, max_length=block_size) for
            text
            in texts]
        # 在tokenizer初始化后,确保unk_token已设置
        print(f"Tokenizer's unk_token: {self.tokenizer.unk_token}, unk_token_id: {self.tokenizer.unk_token_id}")

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i):
        item = self.examples[i]
        # 替换所有不在vocab中的token为unk_token_id
        for key in item.keys():
            item[key] = torch.where(item[key] >= self.tokenizer.vocab_size, self.tokenizer.unk_token_id, item[key])
        return item


def train(model, dataloader, optimizer, scheduler, de, tokenizer):
    model.train()
    for batch in dataloader:
        input_ids = batch['input_ids'].to(de)
        # 添加日志输出检查input_ids
        if torch.any(input_ids >= model.config.vocab_size):
            print("Warning: Some input IDs are outside the model's vocabulary.")
            print(f"Max input ID: {input_ids.max()}, Vocabulary Size: {model.config.vocab_size}")

        attention_mask = batch['attention_mask'].to(de)
        labels = input_ids.clone()
        labels[labels[:, :] == tokenizer.pad_token_id] = -100

        outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs.loss
        loss.backward()

        optimizer.step()
        scheduler.step()
        optimizer.zero_grad()


def main():
    local_model_path = "D:/Pythonxiangmu/PythonandAI/Transformer Models/gpt-2"
    tokenizer = AutoTokenizer.from_pretrained(local_model_path)

    # 确保pad_token已经存在于tokenizer中,对于GPT-2,它通常自带pad_token
    if tokenizer.pad_token is None:
        special_tokens_dict = {'pad_token': '[PAD]'}
        tokenizer.add_special_tokens(special_tokens_dict)
        model = AutoModelForCausalLM.from_pretrained(local_model_path, pad_token_id=tokenizer.pad_token_id)
    else:
        model = AutoModelForCausalLM.from_pretrained(local_model_path)

    model.to(device)

    train_texts = [
        "The quick brown fox jumps over the lazy dog.",
        "In the midst of chaos, there is also opportunity.",
        "To be or not to be, that is the question.",
        "Artificial intelligence will reshape our future.",
        "Every day is a new opportunity to learn something.",
        "Python programming enhances problem-solving skills.",
        "The night sky sparkles with countless stars.",
        "Music is the universal language of mankind.",
        "Exploring the depths of the ocean reveals hidden wonders.",
        "A healthy mind resides in a healthy body.",
        "Sustainability is key for our planet's survival.",
        "Laughter is the shortest distance between two people.",
        "Virtual reality opens doors to immersive experiences.",
        "The early morning sun brings hope and vitality.",
        "Books are portals to different worlds and minds.",
        "Innovation distinguishes between a leader and a follower.",
        "Nature's beauty can be found in the simplest things.",
        "Continuous learning fuels personal growth.",
        "The internet connects the world like never before."
        # 更多训练文本...
    ]

    dataset = TextDataset(tokenizer, train_texts, block_size=128)
    dataloader = DataLoader(dataset, batch_size=4, shuffle=True)

    optimizer = AdamW(model.parameters(), lr=5e-5)
    total_steps = len(dataloader) * 5  # 假设训练5个epoch
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)

    for epoch in range(5):  # 训练5个epoch
        train(model, dataloader, optimizer, scheduler, device, tokenizer)  # 使用正确的变量名dataloader并传递tokenizer

    # 保存微调后的模型
    model.save_pretrained("path/to/save/fine-tuned_model")
    tokenizer.save_pretrained("path/to/save/fine-tuned_tokenizer")


if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    main()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

!chen

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值