1. openai
from openai import OpenAI
def chatgpt_api(query):
client = OpenAI(
base_url='https://api.openai-proxy.live/v1',
api_key='sk-xxxxxxxxxxxxxxxxxxx',
)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content":f"{query}",
}
],
model="gpt-3.5-turbo",
)
return chat_completion.choices[0].message.content
print(chatgpt_api('你是谁'))
2. chatglm
import requests
def chatglm_api(query):
url = "https://api.siliconflow.cn/v1/chat/completions"
payload = {
"model": "THUDM/glm-4-9b-chat",
"messages": [
{
"role": "user",
"content": f"{query}"
}
]
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
}
response = requests.post(url, json=payload, headers=headers)
return eval(response.text)['choices']
print(chatglm_api('你是谁啊'))
3. Qwen
import requests
def qwen_api(query):
url = "https://api.siliconflow.cn/v1/chat/completions"
payload = {
"model": "Qwen/Qwen2-72B-Instruct",
"messages": [
{
"role": "user",
"content": f"{query}"
}
]
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer sk-xxxxxxxxxxxxxxxxxxx"
}
response = requests.post(url, json=payload, headers=headers)
return eval(response.text)['choices']
print(chatglm_api('你是谁啊'))
4. Baichuan
def baichuan_api(query):
api_key = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxx'
# 设置请求的URL、头部信息和请求体
url = "https://api.baichuan-ai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": "Baichuan2-Turbo",
"messages": [
{
"role": "user",
"content": f"{query}"
}
],
"temperature": 0.3,
"top_p": 0.85,
"max_tokens": 2048,
"with_search_enhance": True,
"knowledge_base": {
"ids": []
},
"stream": False
}
# 发送POST请求
response = requests.post(url, headers=headers, json=data)
# 检查请求是否成功
if response.status_code == 200:
# 输出响应内容
return response.json()
else:
# 输出错误信息
print(f"Error: {response.status_code} - {response.text}")
return None
5. 01
import requests
def onezero_api(query):
url = "https://api.siliconflow.cn/v1/chat/completions"
payload = {
"model": "01-ai/Yi-1.5-34B-Chat-16K",
"messages": [
{
"role": "user",
"content": f"{query}"
}
]
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer sk-xxxxxxxxxxxxxxxxxxx"
}
response = requests.post(url, json=payload, headers=headers)
return eval(response.text)['choices']
print(chatglm_api('你是谁啊'))
6. 星火认知
pip install --upgrade spark_ai_python
from sparkai.llm.llm import ChatSparkLLM, ChunkPrintHandler
from sparkai.core.messages import ChatMessage
#星火认知大模型Spark Max的URL值,其他版本大模型URL值请前往文档(https://www.xfyun.cn/doc/spark/Web.html)查看
SPARKAI_URL = 'wss://spark-api.xf-yun.com/v3.5/chat'
#星火认知大模型调用秘钥信息,请前往讯飞开放平台控制台(https://console.xfyun.cn/services/bm35)查看
SPARKAI_APP_ID = ''
SPARKAI_API_SECRET = ''
SPARKAI_API_KEY = ''
#星火认知大模型Spark Max的domain值,其他版本大模型domain值请前往文档(https://www.xfyun.cn/doc/spark/Web.html)查看
SPARKAI_DOMAIN = 'generalv3.5'
if __name__ == '__main__':
spark = ChatSparkLLM(
spark_api_url=SPARKAI_URL,
spark_app_id=SPARKAI_APP_ID,
spark_api_key=SPARKAI_API_KEY,
spark_api_secret=SPARKAI_API_SECRET,
spark_llm_domain=SPARKAI_DOMAIN,
streaming=False,
)
messages = [ChatMessage(
role="user",
content='你好呀'
)]
handler = ChunkPrintHandler()
a = spark.generate([messages], callbacks=[handler])
print(a)
7. Deepseek
import requests
def seepseek_api(query):
url = "https://api.siliconflow.cn/v1/chat/completions"
payload = {
"model": "deepseek-ai/DeepSeek-V2-Chat",
"messages": [
{
"role": "user",
"content": f"{query}"
}
]
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer sk-xxxxxxxxxxxxxxxxxxx"
}
response = requests.post(url, json=payload, headers=headers)
return eval(response.text)['choices']
print(chatglm_api('你是谁啊'))