后续接入langchain
import streamlit as st
from modelscope import AutoModelForCausalLM, AutoTokenizer
import torch
# 加载模型和分词器
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-1_8B-Chat", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen-1_8B-Chat",
device_map="cpu",
trust_remote_code=True
).to('cpu').eval() # 将模型移动到CPU上
model = model.to(torch.device("cpu"))
def generate_response(input_text):
input_ids = tokenizer.encode(input_text, return_tensors='pt').to('cpu') # 转换为PyTorch张量并移到CPU
output = model.generate(input_ids, max_length=100, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# 在Streamlit应用中添加输入框和显示区域
st.title("千问7B聊天模型")
user_input = st.text_input("请输入你想说的话:")
if user_input:
response = generate_response(user_input)
st.write("模型回复:", response)
在CPU跑,sreamlit跑不动,可以在gradio跑
import os
import gradio as gr
from loguru import logger
history = None
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-1_8B-Chat", revision='master', trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B-Chat", revision='master', device_map="auto", trust_remote_code=True).eval()
logger.debug(f'模型加载完毕')
def predict(text):
global history
response, history = model.chat(tokenizer, text, history=history)
logger.debug(f'history:{history}')
history_str = [ f'第{i}轮对话\n'+ '用户:' +history[i][0] +'\n大模型:' +history[i][1] +'\n' for i in range(len(history))]
return response ,''.join(history_str)
# 写一个界面
iface = gr.Interface(fn=predict, inputs=gr.Textbox(label="本轮输入"), outputs=[gr.Textbox(label="本轮输出"), gr.Textbox(label="全部对话")],
title="Qwen-1.8B-Chat")
# 运行界面
iface.launch()