大语言模型

在驱动云平台自己创建一个大语言模型:

环境准备:

!pip install transformers==4.37.0 -U
!pip install streamlit
#安装包

基础版:

zer.batch_decode(generated_ids, skip_special_tokens=True)[0]
from transformers import AutoModelForCausalLM, AutoTokenizer
device = "cuda" # the device to load the model onto

model = AutoModelForCausalLM.from_pretrained(
    "/gemini/pretrain/Qwen2-0.5B-Instruct",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("/gemini/pretrain/Qwen2-0.5B-Instruct")

prompt = "你是谁?"
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokeni
print(response)

流式输出版:

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread

device = "cuda"

model_name = "/gemini/pretrain/Qwen2-0.5B-Instruct"
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
tokenizer = AutoTokenizer.from_pretrained(model_name)
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
]


ef stream_generate(prompt, model, tokenizer, device):
    messages.append({"role": "user", "content": prompt})
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(device)
    generate_params = dict(
        inputs=model_inputs.input_ids,
        max_new_tokens=512,
        do_sample=True,
        top_k=50,
        temperature=0.7,
        pad_token_id=tokenizer.eos_token_id,
        streamer=streamer
    )
    
    thread = Thread(target=model.generate, kwargs=generate_params)
    thread.start()

    generated_text = ""
    for new_text in streamer:
        generated_text += new_text
        print(new_text, end='', flush=True)
    print()
    # generated_text
    messages.append({"role": "user", "content": generated_text})

# 多轮对话
while True:
    user_input = input("User: ")
    if user_input.lower() == 'exit':
        print("Exiting...")
        break
    
    # 生成回复并流式输出
    print("Assistant: ", end="")
    stream_generate(user_input, model, tokenizer, device)

用streamlit搭建可视化界面:

import streamlit as st
from threading import Thread
import json
import time
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import torch

st.title('Qwen2-0.5B-Instruct')

device = "cuda"
model_name = "/gemini/pretrain/Qwen2-0.5B-Instruct"

@st.cache_resource
def get_model():  
    model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    return model, tokenizer

model, tokenizer = get_model()
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

def generate_response(input_text):

    prompt = f'''
    你是一个智能问答助手,这是你需要回答的问题:
    <question>
    {input_text}
    </question>
    '''

    print(prompt)
    # 构建消息列表
    messages = [{"role": "user", "content": prompt}]

    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(device)
    generate_params = dict(
        inputs=model_inputs.input_ids,
        max_new_tokens=512,
        do_sample=True,
        top_k=50,
        temperature=0.7,
        pad_token_id=tokenizer.eos_token_id,
        streamer=streamer
    )
    
    thread = Thread(target=model.generate, kwargs=generate_params)
    thread.start()

    return streamer

with st.form('my_form'):
    text = st.text_area('Enter text:', 'What are the three key pieces of advice for learning how to code?')
    submitted = st.form_submit_button('Submit')
    if submitted:
        message_placeholder = st.empty()
        # 调用模型生成响应
        streamer = generate_response(text)
        response = ""
        for text in streamer:
            response += text
            message_placeholder.info(response + "▌")
    
        message_placeholder.info(response)

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值