Python 使用OpenAi库 输出流式数据
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
import os
import base64
import json
import uuid
import requests
import uvicorn
from openai import OpenAI
from fastapi import FastAPI,HTTPException
from fastapi.responses import FileResponse,Response,StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
import speech_recognition as sr
class VoiceRecognizer:
def __init__(self):
self.recognizer = sr.Recognizer()
self.microphone = sr.Microphone()
with self.microphone as source:
self.recognizer.adjust_for_ambient_noise(source)
def recognize_from_mic(self):
with self.microphone as source:
print("正在监听...")
audio = self.recognizer.listen(
source,
timeout=5,
phrase_time_limit=5
)
return self.process_audio(audio)
def recognize_from_file(self, audio_file):
with sr.AudioFile(audio_file) as source:
audio = self.recognizer.record(source)
return self.process_audio(audio)
def process_audio(self, audio):
try:
text = self.recognizer.recognize_google(
audio,
language='zh-CN',
show_all=True
)
return text
except sr.UnknownValueError:
return "无法识别语音"
except sr.RequestError:
return "服务请求失败"
def read_json(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
config = json.load(file)
return config
config = read_json('config.json')
print(config)
client = OpenAI(api_key=config['al']['api_key'], base_url=config['al']['base_url'])
async def process_openai_stream(response):
"""处理 OpenAI 流式响应"""
for chunk in response:
if chunk.choices[0].delta.content is not None:
yield chunk.choices[0].delta.content
@app.post("/chat")
async def chat(request:dict):
if "messages" not in request:
raise HTTPException(status_code=400, detail="messages is required")
messages = request["messages"]
try:
response = client.chat.completions.create(
model="deepseek-r1-distill-llama-70b",
messages=messages,
stream=True,
)
print(response)
return StreamingResponse(
content=process_openai_stream(response),
media_type="text/event-stream"
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/text_to_speech")
def text_to_speech(request:dict):
if "message" not in request:
raise HTTPException(status_code=400, detail="message is required")
message = request["message"]
try:
header = {"Authorization": f"Bearer;{config['hs']['access_token']}"}
request_json = {
"app": {
"appid": config['hs']['appid'],
"token": "access_token",
"cluster": config['hs']['cluster']
},
"user": {
"uid": "388808087185088"
},
"audio": {
"voice_type": config['hs']['voice_type'],
"emotion": config['hs']['emotion'],
"language": config['hs']['language'],
"encoding": "mp3",
"speed_ratio": 1.0,
"volume_ratio": 1.0,
"pitch_ratio": 1.0,
},
"request": {
"reqid": str(uuid.uuid4()),
"text": message,
"text_type": "plain",
"operation": "query",
"with_frontend": 1,
"frontend_type": "unitTson"
}
}
resp = requests.post(config['hs']['api_url'], json.dumps(request_json), headers=header)
return Response(content=json.dumps(resp.json()), media_type="application/json")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
uvicorn.run(app, port=8086)