简介
实战
使用modelscope 下载千问7B模型,利用FastAPI部署成在线的API接口;
使用history历史对话多轮问答数据,实现多轮对话;
API 构建
import uvicorn
from fastapi import FastAPI
import os
from pydantic import BaseModel
import uvicorn, json, datetime
import torch
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from typing import List, Tuple
app = FastAPI()
class Query(BaseModel):
text: str
history: list = []
model_name = "qwen/Qwen-7B-Chat"
@app.post("/chat/")
async def chat(query: Query):
global model, tokenizer # 声明全局变量以便在函数内部使用模型和分词器
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": query.text}
]
# 此处的prompt template 构建,用不用都行
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
response, history = model.chat(
tokenizer,
text,
history=query.history,
max_length=2048, # 如果未提供最大长度,默认使用2048
top_p=0.7, # 如果未提供top_p参数,默认使用0.7
temperature=0.95 # 如果未提供温度参数,默认使用0.95
)
return {
"result": response,
"history": history
}
# 主函数入口
if __name__ == '__main__':
# 加载预训练的分词器和模型
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name,
device_map="auto",
trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_name,trust_remote_code=True)
model.eval() # 设置模型为评估模式
# 启动FastAPI应用
uvicorn.run(app, host='0.0.0.0', port=6006, workers=1)
访问
class Query(BaseModel):
text: str
history: list = []
在Query
类中定义了需要传递的参数名,text
和history
。
curl
!curl -X POST "http://127.0.0.1:6006/chat/" \
-H 'Content-Type: application/json' \
-d '{"text": "请问你知道我的名字和年龄吗?", "history": [["你好,我是小明,今年18岁了。", "你好,我是Qwen!"]]}'
request库
使用request
POST 传参:
import requests
import json
def get_completion(prompt, history=None):
headers = {'Content-Type': 'application/json'}
data = {
"text": prompt,
"history": history
}
response = requests.post(
url='http://127.0.0.1:6006/chat/',
headers=headers,
data=json.dumps(data))
d = response.json()
result, history = d['result'], d['history']
return result, history
history = []
while True:
key = input('>')
if key == 'q':
break
result, history = get_completion(key, history)
print(result)
结果
多轮对话效果如下:
qwen-7B,能够记住我在前面提供的姓名和年龄。还具备基本的逻辑推理能力,根据年龄推测出生的年份。
qwen 0.5B
qwen 0.5B 推理时,GDP显存占用不到3G,本地的消费级显卡都可以跑,这种小模型适合用来学习;
qwen 0.5B 模型没有 model.chat
方法,故上述的多轮对话使用不了了。
完整的qwen 0.5B API 部署的代码如下:
import uvicorn
from fastapi import FastAPI
import os
from pydantic import BaseModel
import uvicorn
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
from modelscope import AutoModelForCausalLM, AutoTokenizer
app = FastAPI()
class Query(BaseModel):
text: str
model = AutoModelForCausalLM.from_pretrained(
"qwen/Qwen1.5-0.5B-Chat",
device_map="auto"
).eval()
tokenizer = AutoTokenizer.from_pretrained("qwen/Qwen1.5-0.5B-Chat")
@app.post("/chat/")
async def chat(query: Query):
global model, tokenizer # 声明全局变量以便在函数内部使用模型和分词器
prompt = query.text
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to('cuda')
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=512
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return {"result": response}
# 主函数入口
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=6006, workers=1) # 在指定端口和主机上启动应用