【langchain】自用:自定义LLM和ChatModel

BaseLLM

FastAPI部署LLM接口,然后调用接口

class CustomLLM(LLM):
    temperature:float = 0.1
    num_return_sequences:int = 1
    max_new_tokens:int = 1024
    headers:dict = {'Content-Type': 'application/json'}

    @property
    def _llm_type(self) -> str:
        return "custom"

    def _call(self, prompt: str, 
              stop: Optional[List[str]] = None, 
              run_manager: Optional[CallbackManagerForLLMRun] = None,
              **kwargs
    ) -> str:
        if stop is not None:
            raise ValueError("stop kwargs are not permitted.")
        message_user = dict(role="user", content=prompt)
        data = {'messages': [message_user], **self._identifying_params}
        data |= kwargs
        json_str_data = json.dumps(data, ensure_ascii=False)
        response = requests.post(url='http://127.0.0.1:12612/chat/completions', headers=self.headers, data=json_str_data)
        data = response.json()["content"]
        return data

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        return {
            'temperature': self.temperature,
            'num_return_sequences': self.num_return_sequences,
            'max_new_tokens': self.max_new_tokens
        }

ChatModel

官网
官网:自定义教程

from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
    AIMessage,
    BaseMessage,
    HumanMessage,
    SystemMessage
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
class CustomChatModel(BaseChatModel):
    temperature:float = 0.1
    num_return_sequences:int = 1
    max_new_tokens:int = 1024
    headers:dict = {'Content-Type': 'application/json'}

    @property
    def _llm_type(self) -> str:
        return "custom"
    
    def _generate(self, 
        messages: list[BaseMessage],
        stop: Optional[list[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any)->ChatResult:
        
        def langchain2custom(lc_message):
            if type(lc_message) == SystemMessage:
                return dict(role='system', content=lc_message.content)
            return dict(role='user', content=lc_message.content)
        custom_messages = [langchain2custom(mes) for mes in messages]

        data = {'messages': custom_messages, **self._identifying_params}
        data |= kwargs
        json_str_data = json.dumps(data, ensure_ascii=False)
        response = requests.post(url='http://127.0.0.1:12612/chat/completions', headers=self.headers, data=json_str_data)
        data = response.json()["content"]

        out_message = AIMessage(content=data)
        generation = ChatGeneration(message=out_message)
        return ChatResult(generations=[generation])

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        return {
            'temperature': self.temperature,
            'num_return_sequences': self.num_return_sequences,
            'max_new_tokens': self.max_new_tokens
        }

bind tools

可以参考from langchain_openai.chat_models.base import BaseChatOpenAI中的bind_tools函数
参考
参考2

CustomModel工具调用

需要自定义

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值