llama

问题1:如何运行7B以上的模型

13B的参数在一张A100上跑
  1. 合并00, 01两个pth文件
    运行指令: python merge_weights.py --input_dir 00,01.pth存放的目录 --model_size 13B 合并后的参数文件放在同一个目录下
# Original copyright by Jason Phang
# https://github.com/zphang
# Taken here
# https://github.com/huggingface/transformers/pull/21955/commits/8978f28e6c44b083c0b190d3931902c2904c940a#diff-110a445233a8b15a0875998eeaf75cb8607b38a5daa736291dd058766879bbdd

import argparse
import json
import os
import shutil
import torch

"""
Sample usage:
    ```
    python merge_weights.py --input_dir D:\Downloads\LLaMA --model_size 13B
    ```
"""

INTERMEDIATE_SIZE_MAP = {
    "7B": 11008,
    "13B": 13824,
    "30B": 17920,
    "65B": 22016,
}

NUM_SHARDS = {
    "7B": 1,
    "13B": 2,
    "30B": 4,
    "65B": 8,
}


def read_json(path):
    with open(path, "r") as f:
        return json.loads(f.read())


def write_model(input_base_path, model_size):
    assert model_size in INTERMEDIATE_SIZE_MAP

    params = read_json(os.path.join(input_base_path, "params.json"))
    num_shards = NUM_SHARDS[model_size]
    n_layers = params["n_layers"]
    n_heads = params["n_heads"]
    n_heads_per_shard = n_heads // num_shards
    dim = params["dim"]
    dims_per_head = dim // n_heads

    # Load weights
    if model_size == "7B":
        loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
    else:
        loaded = [
            torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
            for i in range(num_shards)
        ]

    state_dict = {}

    for layer_i in range(n_layers):
        if model_size == "7B":
            state_dict |= {
                f"layers.{layer_i}.attention.wq.weight": loaded[
                    f"layers.{layer_i}.attention.wq.weight"
                ],
                f"layers.{layer_i}.attention.wk.weight": loaded[
                    f"layers.{layer_i}.attention.wk.weight"
                ],
                f"layers.{layer_i}.attention.wv.weight": loaded[
                    f"layers.{layer_i}.attention.wv.weight"
                ],
                f"layers.{layer_i}.attention.wo.weight": loaded[
                    f"layers.{layer_i}.attention.wo.weight"
                ],
                f"layers.{layer_i}.feed_forward.w1.weight": loaded[
                    f"layers.{layer_i}.feed_forward.w1.weight"
                ],
                f"layers.{layer_i}.feed_forward.w2.weight": loaded[
                    f"layers.{layer_i}.feed_forward.w2.weight"
                ],
                f"layers.{layer_i}.feed_forward.w3.weight": loaded[
                    f"layers.{layer_i}.feed_forward.w3.weight"
                ],
                f"layers.{layer_i}.attention_norm.weight": loaded[
                    f"layers.{layer_i}.attention_norm.weight"
                ],
                f"layers.{layer_i}.ffn_norm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
            }
        else:
            state_dict.update({
                f"layers.{layer_i}.attention_norm.weight": loaded[0][
                    f"layers.{layer_i}.attention_norm.weight"
                ],
                f"layers.{layer_i}.ffn_norm.weight": loaded[0][f"layers.{layer_i}.ffn_norm.weight"],
            })
            state_dict[f"layers.{layer_i}.attention.wq.weight"] = torch.cat(
                [
                    loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
                    for i in range(num_shards)
                ],
                dim=0,
            ).reshape(dim, dim)
            state_dict[f"layers.{layer_i}.attention.wk.weight"] = torch.cat(
                [
                    loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(n_heads_per_shard, dims_per_head, dim)
                    for i in range(num_shards)
                ],
                dim=0,
            ).reshape(dim, dim)
            state_dict[f"layers.{layer_i}.attention.wv.weight"] = torch.cat(
                [
                    loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(n_heads_per_shard, dims_per_head, dim)
                    for i in range(num_shards)
                ],
                dim=0,
            ).reshape(dim, dim)
            state_dict[f"layers.{layer_i}.attention.wo.weight"] = torch.cat(
                [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
            )
            state_dict[f"layers.{layer_i}.feed_forward.w1.weight"] = torch.cat(
                [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
            )
            state_dict[f"layers.{layer_i}.feed_forward.w2.weight"] = torch.cat(
                [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
            )
            state_dict[f"layers.{layer_i}.feed_forward.w3.weight"] = torch.cat(
                [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
            )

    if model_size == "7B":
        state_dict |= {
            "tok_embeddings.weight": loaded["tok_embeddings.weight"],
            "norm.weight": loaded["norm.weight"],
            "output.weight": loaded["output.weight"],
        }
    else:
        state_dict.update ({
            "norm.weight": loaded[0]["norm.weight"],
            "tok_embeddings.weight": torch.cat(
                [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
            ),
            "output.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
        })

    torch.save(state_dict, 'merged.pth')


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input_dir",
        help="Location of LLaMA weights, which contains tokenizer.model and model folders",
    )
    parser.add_argument(
        "--model_size",
        choices=["7B", "13B", "30B", "65B"],
    )
    args = parser.parse_args()

    write_model(
        input_base_path=os.path.join(args.input_dir, args.model_size),
        model_size=args.model_size,
    )


if __name__ == "__main__":
    main()

之后用torchrun --nproc_per_node 1 example --ckpt_dir 合并后的参数文件所在的目录 --tokenizer_path tokenizer.model运行即可。
2. 用两张卡跑
待补充

问题2:相同的prompt输入到llama中,不管几次,都会得到相同的answer

由于llama除了7B以外都是多GPU并行,所以在为了确保并行计算在每个GPU上的可复现性,会固定随机数生成器的种子,即torch.manual_seed(1)
example.py -> setup_model_parallel()

def setup_model_parallel() -> Tuple[int, int]:
    local_rank = int(os.environ.get("LOCAL_RANK", -1))
    world_size = int(os.environ.get("WORLD_SIZE", -1))

    torch.distributed.init_process_group("nccl")
    initialize_model_parallel(world_size)
    torch.cuda.set_device(local_rank)

    # seed must be the same in all processes
    torch.manual_seed(1)
    return local_rank, world_size

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值