【HuggingFace Transformers】Llama Model的应用

1. LlamaForCausalLM类 介绍

LLama 模型采用了 Transformer 架构中的解码器结构,是一种自回归语言模型,擅长生成任务,如自然语言理解、生成、对话、以及其他与文本生成相关的任务。而 LlamaForCausalLM 是一个专门用于因果语言建模(Causal Language Modeling)的类,并基于 LLaMA 模型架构构建。这个类主要用于文本生成、对话系统、代码生成等自然语言生成任务。

官方示例:

>>> from transformers import AutoTokenizer, LlamaForCausalLM

>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")

>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")

>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."

2. LlamaForCausalLM类 源码解析

源码地址:transformers/src/transformers/models/llama/modeling_llama.py

# -*- coding: utf-8 -*-
# @time: 2024/9/3 17:09
import torch
import torch.nn.functional as F

from typing import Optional, Union, List, Tuple
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import LlamaPreTrainedModel, LlamaModel, Cache, StaticCache
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.models.llama.modeling_llama import LLAMA_INPUTS_DOCSTRING, _CONFIG_FOR_DOC
from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings, is_torchdynamo_compiling
from transformers.utils import logging

logger = logging.get_logger(__name__)


class LlamaForCausalLM(LlamaPreTrainedModel):
    _tied_weights_keys = ["lm_head.weight"]

    def __init__(self, config):
        super().__init__(config)
        self.model = LlamaModel(config)  # 定义Llama模型
        self.vocab_size = config.vocab_size  # 获取词汇表大小
        # 定义线性层,用于将模型的隐藏状态hidden states转换为词汇表大小的预测分布
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)

        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        return self.model.embed_tokens

    def set_input_embeddings(self, value):
        self.model.embed_tokens = value

    def get_output_embeddings(self):
        return self.lm_head

    def set_output_embeddings(self, new_embeddings):
        self.lm_head = new_embeddings

    def set_decoder(self, decoder):
        self.model = decoder

    def get_decoder(self):
        return self.model

    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: torch.LongTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        cache_position: Optional[torch.LongTensor] = None,
        num_logits_to_keep: int = 0,
    ) -> Union[Tuple, CausalLMOutputWithPast]:
        r"""
        Args:
            labels (`torch.LongTensor`,形状为 `(batch_size, sequence_length)`,*可选*):
                用于计算掩码语言模型损失的标签。索引应在 `[0, ..., config.vocab_size]` 范围内,或为 -100(参见 `input_ids` 的文档字符串)。
                索引为 `-100` 的标记会被忽略(即掩码掉),损失只针对标签在 `[0, ..., config.vocab_size]` 范围内的标记进行计算。

            num_logits_to_keep (`int`,*可选*):
                计算最后 `num_logits_to_keep` 个标记的 logits。如果设置为 `0`,则计算所有 `input_ids` 的 logits(特殊情况)。
                生成时只需要最后一个标记的 logits,计算时仅针对该标记进行计算可以节省内存,这对于长序列或大词汇量的情况尤为重要。

        Returns:
        """
        # 检查输入参数是否为 None 来确定模型是否应该输出 attentions、hidden states 以及是否返回字典格式的输出。
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
        # 调用 LlamaModel 模型,并传递输入参数,outputs 保存了模型的输出。
        outputs = self.model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            cache_position=cache_position,
        )

        hidden_states = outputs[0]  # 获取模型的隐藏层输出

        # 如果模型在预训练时采用了模型并行技术,需要将 lm_head(线性层的权重)按照 pretraining_tp 切片,然后分别计算 logits,最后再将它们拼接在一起。
        if self.config.pretraining_tp > 1:
            lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
            logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
            logits = torch.cat(logits, dim=-1)
        else:  # 否则,直接使用 lm_head 计算 logits。
            if labels is None and not is_torchdynamo_compiling():
                logger.warning_once(
                    "Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)"
                )
            # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
            # TODO: remove the float() operation in v4.46
            # logits 只计算所需的部分(根据 num_logits_to_keep 参数决定),并将其转换为浮点类型,float()在 v4.46 中移除
            logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]).float()

        # 损失计算
        loss = None
        if labels is not None:
            # Upcast to float if we need to compute the loss to avoid potential precision issues
            logits = logits.float()

            # shift_logits 和 shift_labels 用于将 logits 和labels对齐,使得每个位置预测下一个位置的 token。
            # Shift so that tokens < n predict n
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()

            # 使用交叉熵损失函数计算 shift_logits 和 shift_labels 之间的损失loss
            loss_fct = CrossEntropyLoss()
            # Flatten the tokens
            shift_logits = shift_logits.view(-1, self.config.vocab_size)
            shift_labels = shift_labels.view(-1)
            # Enable model parallelism
            shift_labels = shift_labels.to(shift_logits.device)
            loss = loss_fct(shift_logits, shift_labels)

        # 返回结果
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output

        return CausalLMOutputWithPast(
            loss=loss,
            logits=logits,
            past_key_values=outputs.past_key_values,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

    # 为生成准备输入数据,确保所有必要的数据和参数被正确地组织和准备好,以便生成过程可以顺利进行
    def prepare_inputs_for_generation(
        self,
        input_ids,
        past_key_values=None,
        attention_mask=None,
        inputs_embeds=None,
        cache_position=None,
        position_ids=None,
        use_cache=True,
        num_logits_to_keep=0,
        **kwargs,
    ):
        # 如果 past_key_values 不为空,说明在使用缓存。通过 `cache_position` 对 `input_ids` 进行切片,以保留仅未处理的 tokens
        # Exception 1:当传递 inputs_embeds 时,input_ids 可能缺少条目。
        # Exception 2:一些生成方法对 input_ids 进行了特殊的切片操作,因此我们不需要在这里处理。
        if past_key_values is not None:
            if inputs_embeds is not None:  # Exception 1
                input_ids = input_ids[:, -cache_position.shape[0]:]
            elif input_ids.shape[1] != cache_position.shape[0]:  # Default case (the "else", a no op, is Exception 2)
                input_ids = input_ids[:, cache_position]

        # 如果提供了 attention_mask 但未提供 position_ids,那么将在生成过程中动态创建 position_ids
        if attention_mask is not None and position_ids is None:
            # create position_ids on the fly for batch generation
            position_ids = attention_mask.long().cumsum(-1) - 1
            position_ids.masked_fill_(attention_mask == 0, 1)
            if past_key_values:
                position_ids = position_ids[:, -input_ids.shape[1]:]

                # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s  `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
                # 这个 `clone` 操作是为了避免在使用 `torch.compile` 的 `mode="reduce-overhead"` 模式时重新捕获 cuda 图,
                # 因为如果输入的 `position_ids` 在解码过程中有各种步幅,会重新触发捕获。这里只使用 `.contiguous()` 是不够的,
                # 因为在 batch size = 1 的情况下,`position_ids` 已经是连续的,但步幅不同,这会重新触发捕获。
                position_ids = position_ids.clone(memory_format=torch.contiguous_format)

        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
        # 如果传递了 `inputs_embeds`,我们只希望在生成的第1个步骤中使用它们。
        if inputs_embeds is not None and cache_position[0] == 0:
            model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
        else:
            # The clone here is for the same reason as for `position_ids`.
            # 这里的 `clone` 是出于与 `position_ids` 相同的原因。
            model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}

        # 如果 past_key_values 是 StaticCache 并且 attention_mask 是二维的
        if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
            # 根据 inputs_embeds 或者 input_ids 获取batch_size, sequence_length, device
            if model_inputs["inputs_embeds"] is not None:
                batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
                device = model_inputs["inputs_embeds"].device
            else:
                batch_size, sequence_length = model_inputs["input_ids"].shape
                device = model_inputs["input_ids"].device

            # 获取 lm_head 的数据类型,并找到该数据类型的最小可能值
            dtype = self.lm_head.weight.dtype
            min_dtype = torch.finfo(dtype).min

            # 准备带有 cache_position 的 4D 因果注意力 mask
            attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
                attention_mask,
                sequence_length=sequence_length,
                target_length=past_key_values.get_max_length(),
                dtype=dtype,
                device=device,
                min_dtype=min_dtype,
                cache_position=cache_position,
                batch_size=batch_size,
            )

        # 更新模型的输入字典,添加 position_ids、cache_position、past_key_values 等
        model_inputs.update(
            {
                "position_ids": position_ids,
                "cache_position": cache_position,
                "past_key_values": past_key_values,
                "use_cache": use_cache,
                "attention_mask": attention_mask,
                "num_logits_to_keep": num_logits_to_keep,
            }
        )
        return model_inputs  # 返回构建好的模型输入字典
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

CS_木成河

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值