from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama.llms import OllamaLLM
template ="""Question: {question}
Answer: Let's think step by step."""
prompt = ChatPromptTemplate.from_template(template)
model = OllamaLLM(model="llama3")
chain = prompt | model
chain.invoke({"question":"What is LangChain?"})
02 demo实验
from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama.llms import OllamaLLM
# template 起到一个规定回答问题的格式模板的作用
template ="""Question: {question}
Answer: 请用中文回答我的问题."""
prompt = ChatPromptTemplate.from_template(template)
model = OllamaLLM(model="glm4:9b")
chain = prompt | model
whileTrue:
question =input("请输入您的问题:")if question =="结束":print("-"*30+"感谢您的使用"+"-"*30)break
answer = chain.invoke({"question": question})print(answer)print("-"*30)
03 读取txt回答问题官方示例
# importfrom langchain_chroma import Chroma
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import(
SentenceTransformerEmbeddings,)from langchain_text_splitters import CharacterTextSplitter
# load the document and split it into chunks
loader = TextLoader("../../how_to/state_of_the_union.txt")
documents = loader.load()# split it into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)# create the open-source embedding function
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")# load it into Chroma
db = Chroma.from_documents(docs, embedding_function)# query it
query ="What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)# print resultsprint(docs[0].page_content)