使用langchian在LLM应用程序中添加审查
from langchain.chains import OpenAIModerationChain
from langchain_core.prompts import ChatPromptTemplate
from config import llm
moderate = OpenAIModerationChain()
prompt = ChatPromptTemplate.from_messages([("system", "repeat after me: {input}")])
# 没有审查之前
chain = prompt | llm
chain.invoke({"input": "you are stupid"})
# '\n\nYou are stupid.'
# 添加审查之后
moderated_chain = chain | moderate
moderated_chain.invoke({"input": "you are stupid"})
# {'input': '\n\nYou are stupid', 'output': "Text was found that violates OpenAI's content policy."}