# Requires:# pip install langchain docarray tiktokenimport os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())# 读取本地 .env 文件,里面定义了 OPENAI_API_KEY#############
api_key = os.environ['AZURE_OPENAI_API_KEY']
azure_endpoint = os.environ['AZURE_OPENAI_ENDPOINT']
api_version = os.environ['AZURE_OPENAPI_VERSION']
model="gpt-35-turbo"
deployment_name="gpt-35-turbo"############from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_openai.chat_models import AzureChatOpenAI
from langchain_openai.embeddings import AzureOpenAIEmbeddings
## retrieve relevant documents to the search and include them as part of the context.
vectorstore = DocArrayInMemorySearch.from_texts(["harrison worked at kensho","bears like to eat honey"],
embedding=AzureOpenAIEmbeddings(),)
retriever = vectorstore.as_retriever()## if no vector, the code as following:# retriever.invoke("where did harrison work?")
template ="""Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = AzureChatOpenAI(
model=model,
deployment_name=deployment_name,
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,)
output_parser = StrOutputParser()## create a RunnableParallel object with two entries. The first entry, context will include the document results fetched by the retriever.## The second entry, question will contain the user’s original question. To pass on the question, we use RunnablePassthrough to copy this entry.## using the retriever for document search, and RunnablePassthrough to pass the user’s question
setup_and_retrieval = RunnableParallel({"context": retriever,"question": RunnablePassthrough()})
chain = setup_and_retrieval | prompt | model | output_parser
print(chain.invoke("where did harrison work?"))