vLLM部署qwen大模型加速推理实现

目录

step1: 编写 vllm_wrapper.py

step2: 应用场景:给体检指标生成健康建议


step1: 编写 vllm_wrapper.py

from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
from typing import Optional, Callable, List, Tuple, Union
import copy
import torch
from transformers import AutoTokenizer
from transformers.generation.logits_process import LogitsProcessorList
from packaging import version

_ERROR_BAD_CHAT_FORMAT = """\
We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
"""

IMEND = "<|im_end|>"
ENDOFTEXT = "<|endoftext|>"

HistoryType = List[Tuple[str, str]]
TokensType = List[int]
BatchTokensType = List[List[int]]

def get_stop_words_ids(chat_format, tokenizer):
    if chat_format == "raw":
        stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
    elif chat_format == "chatml":
        stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
    else:
        raise NotImplementedError(f"Unknown chat format {chat_format!r}")
    return stop_words_ids

def make_context(
    tokenizer: PreTrainedTokenizer,
    query: str,
    history: List[Tuple[str, str]] = None,
    system: str = "",
    max_window_size: int = 6144,
    chat_format: str = "chatml",
):
    if history is None:
        history = []

    if chat_format == "chatml":
        im_start, im_end = "<|im_start|>", "<|im_end|>"
        im_start_tokens = [tokenizer.im_start_id]
        im_end_tokens = [tokenizer.im_end_id]
        nl_tokens = tokenizer.encode("\n")

        def _tokenize_str(role, content):
            return f"{role}\n{content}", tokenizer.encode(
                role, allowed_special=set()
            ) + nl_tokens + tokenizer.encode(content, allowed_special=set())

        system_text, system_tokens_part = _tokenize_str("system", system)
        system_tokens = im_start_tokens + system_tokens_part + im_end_tokens

        raw_text = ""
        context_tokens = []

        for turn_query, turn_response in reversed(history):
            query_text, query_tokens_part = _tokenize_str("user", turn_query)
            query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
            response_text, response_tokens_part = _tokenize_str(
                "assistant", turn_response
            )
            response_tokens = im_start_tokens + response_tokens_part + im_end_tokens

            next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
            prev_chat = (
                f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
            )

            current_context_size = (
                len(system_tokens) + len(next_context_tokens) + len(context_tokens)
            )
            if current_context_size < max_window_size:
                context_tokens = next_context_tokens + context_tokens
                raw_text = prev_chat + raw_text
            else:
                break

        context_tokens = system_tokens + context_tokens
        raw_text = f"{im_start}{system_text}{im_end}" + raw_text
        context_tokens += (
            nl_tokens
            + im_start_tokens
            + _tokenize_str("user", query)[1]
            + im_end_tokens
            + nl_tokens
            + im_start_tokens
            + tokenizer.encode("assistant")
            + nl_tokens
        )
        raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"

    elif chat_format == "raw":
        raw_text = query
        context_tokens = tokenizer.encode(raw_text)
    else:
        raise NotImplementedError(f"Unknown chat format {chat_format!r}")

    return raw_text, context_tokens

class vLLMWrapper:
    def __init__(self,
               model_dir: str,
               trust_remote_code: bool = True,
               tensor_parallel_size: int = 1,
               gpu_memory_utilization: float = 0.98,
               dtype: str = "bfloat16",
               **kwargs):

        if dtype not in ("bfloat16", "float16", "float32"):
            print("now not support {}!".format(dtype))
            raise Exception

        # build generation_config
        self.generation_config = GenerationConfig.from_pretrained(model_dir, trust_remote_code=trust_remote_code)

        # build tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
        self.tokenizer.eos_token_id = self.generation_config.eos_token_id

        self.stop_words_ids = []

        from vllm import LLM
        import vllm
        if version.parse(vllm.__version__) >= version.parse("0.2.2"):
            self.__vllm_support_repetition_penalty = True
        else:
            self.__vllm_support_repetition_penalty = False

        quantization = getattr(kwargs, 'quantization', None)

        self.model = LLM(model=model_dir,
                            tokenizer=model_dir,
                            tensor_parallel_size=tensor_parallel_size,
                            trust_remote_code=trust_remote_code,
                            quantization=quantization,
                            gpu_memory_utilization=gpu_memory_utilization,
                            dtype=dtype)

        for stop_id in get_stop_words_ids(self.generation_config.chat_format, self.tokenizer):
            self.stop_words_ids.extend(stop_id)
        self.stop_words_ids.extend([self.generation_config.eos_token_id])

    def chat(self,
        query: str,
        history: Optional[HistoryType],
        tokenizer: PreTrainedTokenizer = None,
        system: str = "You are a helpful assistant.",
        generation_config: Optional[GenerationConfig] = None,
        **kwargs):
        generation_config = generation_config if generation_config is not None else self.generation_config
        tokenizer = self.tokenizer if tokenizer is None else tokenizer

        assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
        if not self.__vllm_support_repetition_penalty and generation_config.repetition_penalty != 1:
            raise RuntimeError("The installed vLLM doesn't support repetition_penalty, please set ``model.generation_config.repetition_penalty = 1`` or install vllm>=0.2.2")

        if history is None:
            history = []
        else:
            # make a copy of the user's input such that is is left untouched
            history = copy.deepcopy(history)

        extra_stop_words_ids = kwargs.get('stop_words_ids', None)
        if extra_stop_words_ids is None:
            extra_stop_words_ids = []

        max_window_size = kwargs.get('max_window_size', None)
        if max_window_size is None:
            max_window_size = generation_config.max_window_size

        from vllm.sampling_params import SamplingParams
        sampling_kwargs = {
            "stop_token_ids": self.stop_words_ids,
            "early_stopping": False,
            "top_p": generation_config.top_p,
            "top_k": -1 if generation_config.top_k == 0 else generation_config.top_k,
            "temperature": generation_config.temperature,
            "max_tokens": generation_config.max_new_tokens,
            "repetition_penalty": generation_config.repetition_penalty
        }
        if not self.__vllm_support_repetition_penalty:
            sampling_kwargs.pop("repetition_penalty")
        sampling_params = SamplingParams(**sampling_kwargs)

        raw_text, context_tokens = make_context(
            self.tokenizer,
            query,
            history=history,
            system=system,
            max_window_size=max_window_size,
            chat_format=generation_config.chat_format,
        )

        req_outputs = self.model.generate([query],
                                            sampling_params=sampling_params,
                                            prompt_token_ids=[context_tokens])
        req_output = req_outputs[0]

        prompt_str = req_output.prompt
        prompt_ids = req_output.prompt_token_ids
        req_sample_output_ids = []
        req_sample_output_strs = []
        for sample in req_output.outputs:
            output_str = sample.text
            output_ids = sample.token_ids
            if IMEND in output_str:
                output_str = output_str[:-len(IMEND)]
            if ENDOFTEXT in output_str:
                output_str = output_str[:-len(ENDOFTEXT)]
            req_sample_output_ids.append(prompt_ids + output_ids)
            req_sample_output_strs.append(prompt_str + output_str)
        assert len(req_sample_output_strs) == 1
        response = req_sample_output_strs[0][len(prompt_str):]
        history.append((prompt_str, response))

        return response, history

if __name__ == '__main__':

    model_dir = 'Qwen/Qwen-72B-Chat'
    tensor_parallel_size = 2

    model = vLLMWrapper(model_dir,
                        tensor_parallel_size=tensor_parallel_size,
                        )

    response, history = model.chat(query="你好",
                                   history=None)
    print(response)
    response, history = model.chat(query="给我讲一个年轻人奋斗创业最终取得成功的故事。",
                                   history=history)
    print(response)
    response, history = model.chat(query="给这个故事起一个标题",
                                   history=history)
    print(response)

step2: 应用场景:给体检指标生成健康建议


import pandas as pd
from peft import AutoPeftModelForCausalLM
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
from vllm_wrapper import vLLMWrapper
from tqdm import tqdm

sft_path = "/mnt/sdd/Qwen-7B-Chat"
tokenizer = AutoTokenizer.from_pretrained(sft_path, trust_remote_code=True)
model = vLLMWrapper(sft_path, tensor_parallel_size=1)
# model = AutoModelForCausalLM.from_pretrained(sft_path, device_map="auto", trust_remote_code=True).eval()

import uuid

def data_load():
    zb = pd.read_csv('/home/wangyp/Big_Model/infectious_disease/data/zb.csv', header=0)
    # print(zb.head(10))
    nums = []
    yins = {}
    nulls = []
    
    for item in zb.itertuples(index=False):
        if item.normal_desc is not None and item.normal_desc.strip() != '':
            yins[item.NAME.strip()] = item.normal_desc.strip()
        if( (item.range_floor is not None and str(item.range_floor).strip() != '') and (item.range_ceil is not None and str(item.range_ceil).strip() != '') ):
            nums.append({item.NAME.strip(): str(item.range_floor).strip() + "-" + str(item.range_ceil).strip() })
        else:
            nulls.append(item.NAME.strip())
    return yins, nums, nulls

def zao_yins(yins):
    res = []
    for key in yins.keys():
        res.append( key + "这种体检指标阳性。")
        res.append( key + "的检测结果为阳性。")
        res.append( key + "的检测显示出阳性反应。")
        res.append( key + "检测结果为阳性。")
        res.append( key + "的阳性结果在体检中被检测到。")
        res.append( key + "试验为阳性状态")
        res.append( key + "的阳性结果在体检中得到了确认。")
        res.append( key + "的检测结果表明为阳性。")
        res.append( "在进行检测" + key + "指标时,结果被判定为阳性。")
    return res

def zao_nums(nums):
    res = []
    keys_list = [key for d in nums for key in d.keys()]
    for key in keys_list:
        res.append({"name": key, "value": key + "检测结果显示异常。"})
        # res.append({"name": key, "value": key + "的检查值超出了正常范围。"})
        res.append({"name": key, "value": key + "的测量值与标准值不符。"})
        # res.append({"name": key, "value": key + "检测结果呈现异常状态。"})
        res.append({"name": key, "value": key + "的数值在体检中被标记为异常。"})
        # res.append({"name": key, "value": key + "检查结果显示了不正常的数值。"})
        res.append({"name": key, "value": key + "的检测结果不在正常参考值内。"})
        # res.append({"name": key, "value": key + "检查报告提示数值异常。"})
        # res.append({"name": key, "value": "体检报告指出" + key + "水平不正常。"})
        res.append({"name": key, "value": "体检中发现" + key + "水平异常。"})
        # res.append(key + "检测结果显示异常。")

    return res
# 体检中发现尿酮水平异常,帮我生成10条描述,保持句子意思不变
def z_nulls(nulls):
    res = []
    for key in nulls:
        res.append("体检结果显示" + key + "水平出现异常。")
        res.append("在进行体检时,发现" + key + "的数值不在正常范围内。")
        res.append("体检报告中指出" + key + "水平有异常情况。")
        res.append("体检时," + key + "水平的测定结果超出了预期的正常值。")
        res.append("体检中测得的" + key + "水平与正常值有所偏差。")
        res.append("体检数据中," + key + "的数值检测出异常。")
        res.append(key + "的检测结果表明存在异常。")
        res.append(key + "的检测值在体检中被标记为异常。")
        res.append(key + "水平的体检结果提示有异常。")
    return res

yins_template = """

"""

# 定义一个带有槽位的字符串模板
# yins_template = "Hello, {name}! You are {age} years old."
yins_template = """hhh******************"""

nums_template = """
{disc}你是一名体检报告领域的专家,请生成一段关于该体检指标异常的改善建议。\n下面是生成体检指标相关的建议时的要求:健康建议严格包含如下几项:复检确认、营养评估、医疗咨询、健康饮食、生活方式调整、药物治疗、定期监测、记录症状这几项。生成建议请参考以下格式:\n体检结果提示您的{name}不在正常参考值内,这可能与多种因素有关,包括营养不良、维生素缺乏或某些疾病状态。以下是一些建议:\n复检确认:{name}相关的复检建议。\n营养评估:考虑针对{name}进行一次全面的营养评估。\n医疗咨询:咨询医生,以确定是否需要进一步的检查和{name}相关的其他检测。如血红蛋白电泳、血清铁蛋白、维生素B12和叶酸水平检测。\n健康饮食:饮食建议,这些食物富含补充{name}必要的营养素。\n生活方式调整:保持适度的体育活动,避免饮酒和吸烟,这些都可能影响{name}的健康。\n药物治疗:如果医生建议,可能需要服用补充剂或药物来纠正{name}异常。\n定期监测:根据医生的建议,定期监测{name}和其他{name}相关指标。\n记录症状:注意任何可能与{name}相关的症状,如疲劳、头晕或呼吸困难,并及时告知医生。\n请记住,{name}的异常可能是多种情况的指标,因此重要的是遵循医疗专业人员的指导进行进一步的评估和治疗。\n
"""
# filled_nums_template = nums_template.format(name=name)


def load_model():
    pass
    
if __name__ == '__main__':
    all = []
    yins, nums, nulls = data_load()
    # 遍历,一个字段造10个template,存储到list中,写入文件
    yins_tem = zao_yins(yins)
    nums_tem = zao_nums(nums)
    nulls_tem = z_nulls(nulls)
    # all = yins_tem + nums_tem + nulls_tem
    # print(len(all))
    nums_conversations = []
    for num in tqdm(nums_tem):
        filled_nums_template = nums_template.format(disc=num["value"], name=num["name"])
        response, history = model.chat(filled_nums_template, history=None)
        nums_conversations.append({"id": str(uuid.uuid4()), "conversations": [{"from": "user", "value": num["value"]}, {"from": "assistant", "value": response}]})

    with open("/home/wangyp/Big_Model/infectious_disease/data/zb_train.json", "w", encoding="utf-8") as f:
        f.write(",\n".join(str(i) for i in nums_conversations))

    print("nums_conversations数据处理完毕。。。。。。。。。。。。。。。。。。。")

  • 4
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值