简单调用讯飞星火模型和阿里云通义千问模型进行多轮问答

讯飞星火认知模型:

打开讯飞星火界面(讯飞星火认知大模型-AI大语言模型-星火大模型-科大讯飞 (xfyun.cn),点击API免费使用,可免费使用个人免费包

跳转讯飞开放平台-以语音交互为核心的人工智能开放平台 (xfyun.cn),打开控制台界面,创建新应用,然后即可获取APPID,APISecret,APIKey。

多轮调用代码:将代码主函数部分的三个刚刚获取到的APPID,APISecret,APIKey输入即可,运行代码可实现多轮问答。

# coding: utf-8
import _thread as thread
import os
import time
import base64

import base64
import datetime
import hashlib
import hmac
import json
from urllib.parse import urlparse
import ssl
from datetime import datetime
from time import mktime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time

import websocket
import openpyxl
from concurrent.futures import ThreadPoolExecutor, as_completed
import os


class Ws_Param(object):
    # 初始化
    def __init__(self, APPID, APIKey, APISecret, gpt_url):
        self.APPID = APPID
        self.APIKey = APIKey
        self.APISecret = APISecret
        self.host = urlparse(gpt_url).netloc
        self.path = urlparse(gpt_url).path
        self.gpt_url = gpt_url

    # 生成url
    def create_url(self):
        # 生成RFC1123格式的时间戳
        now = datetime.now()
        date = format_date_time(mktime(now.timetuple()))

        # 拼接字符串
        signature_origin = "host: " + self.host + "\n"
        signature_origin += "date: " + date + "\n"
        signature_origin += "GET " + self.path + " HTTP/1.1"

        # 进行hmac-sha256进行加密
        signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
                                 digestmod=hashlib.sha256).digest()

        signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')

        authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'

        authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')

        # 将请求的鉴权参数组合为字典
        v = {
            "authorization": authorization,
            "date": date,
            "host": self.host
        }
        # 拼接鉴权参数,生成url
        url = self.gpt_url + '?' + urlencode(v)
        # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
        return url


# 收到websocket错误的处理
def on_error(ws, error):
    print("### error:", error)


# 收到websocket关闭的处理
def on_close(ws):
    print("### closed ###")


# 收到websocket连接建立的处理
def on_open(ws):
    thread.start_new_thread(run, (ws,))


def run(ws, *args):
    data = json.dumps(gen_params(appid=ws.appid, query=ws.query, domain=ws.domain))
    ws.send(data)


# 收到websocket消息的处理
def on_message(ws, message):
    data = json.loads(message)
    code = data['header']['code']
    if code != 0:
        print(f'请求错误: {code}, {data}')
        ws.close()
    else:
        choices = data["payload"]["choices"]
        status = choices["status"]
        content = choices["text"][0]["content"]
        print(content, end='')
        if status == 2:
            print("#### 关闭会话")
            ws.close()


def gen_params(appid, query, domain):
    data = {
        "header": {
            "app_id": appid,
            "uid": "1234",
        },
        "parameter": {
            "chat": {
                "domain": domain,
                "temperature": 0.5,
                "max_tokens": 4096,
                "auditing": "default",
            }
        },
        "payload": {
            "message": {
                "text": [{"role": "user", "content": query}]
            }
        }
    }
    return data


def main(appid, api_secret, api_key, gpt_url, domain, messages):
    wsParam = Ws_Param(appid, api_key, api_secret, gpt_url)
    websocket.enableTrace(False)
    wsUrl = wsParam.create_url()

    ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
    ws.appid = appid
    ws.query = messages[-1]["content"]  # Send the latest user query
    ws.domain = domain
    ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})


import sparkAPI as LLM_API

messages = []


# 并入一条新的message,可以是用户的问题,也可以是大模型的回答
def add_message(role, content):
    json_con = {
        "role": role,
        "content": content
    }

    messages.append(json_con)


# 获取messages的长度,向大模型发送的问题token数量是有限制的
def get_messages_length(messages):
    length = sum(len(message["content"]) for message in messages)
    return length


# 检查token数量,确保不超过限制
def check_messages_length(messages):
    while get_messages_length(messages) > 8000:
        messages.pop(0)
    return messages


if __name__ == "__main__":
    # 准备用户API信息 以及访问的模型url
    appid = "1178223e"
    api_secret = "xxx"
    api_key = "xxx"
    gpt_url = "xxx"
    domain = "generalv3.5"

    messages.clear()

    while True:  # 循环进行会话
        user_query = input("\nUser: ")

        # 将用户问题加入历史消息中
        add_message("user", user_query)

        # 检查消息长度并发送给大模型
        checked_messages = check_messages_length(messages)
        LLM_API.answer = ""
        print("\nAssistant:", end="")
        main(appid, api_secret, api_key, gpt_url, domain, checked_messages)

        # 将大模型返回的答案加入历史消息中
        add_message("assistant", LLM_API.answer)

运行结果展示:

阿里云通义千问模型:

打开网址:模型服务灵积 DashScope - 阿里云 (aliyun.com),立即开通,然后进入控制台,即可查看API信息。

多轮对话代码:

将API_KEY修改即可运行调用。

from http import HTTPStatus
import dashscope
from dashscope import Generation
from dashscope.api_entities.dashscope_response import Role

dashscope.api_key = 'xxx' # 设置API_KEY

def conversation_with_messages():
    messages = [{'role': Role.SYSTEM, 'content': 'You are a helpful assistant.'}  ]
    # 循环实现多轮会话
    while True:
        prompt = input("USER:")
        # 添加新一轮会话用户的问题
        messages.append({'role': Role.USER, 'content': prompt})
        response = Generation.call(
            Generation.Models.qwen_turbo, #选择响应的模型
            messages=messages,
            result_format='message',  # set the result to be "message" format.
        )
        if response.status_code == HTTPStatus.OK:
            for choice in response.output.choices:
                print(f"{choice['message']['role']}: {choice['message']['content']}")
                # 把模型的输出添加到messages中
                messages.append({'role': choice['message']['role'],
                                 'content': choice['message']['content']})
        else:
            print('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
                response.request_id, response.status_code,
                response.code, response.message
            ))
            exit()

if __name__ == '__main__':
    conversation_with_messages()

运行结果展示:

以上内容只是对于这两类大模型的简单直接调用,如需深入了解可参考其他大佬文章!加油!谢谢大家!

  • 16
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值