from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings from llama_index.llms.ollama import Ollama from langchain_huggingface import HuggingFaceEmbeddings # 配置ollama的LLM模型,这里我们用gemma:7b Settings.llm = Ollama(model="gemma", request_timeout=600.0) path="D:\\project\\ollama\\bge-small-zh-v1.5" # 配置HuggingFaceEmbeddings嵌入模型,这里我们用BAAI/bge-small-zh-v1.5 Settings.embed_model = HuggingFaceEmbeddings(model_name=path) # 将data文件夹下的文档建立索引 documents = SimpleDirectoryReader("data").load_data() index = VectorStoreIndex.from_documents(documents) # 创建问答引擎,并提一个简单的问题 query_engine = index.as_query_engine() response = query_engine.query("今天星期几") print(response)
利用ollama实现本地大模型RAG智能回答
最新推荐文章于 2024-09-18 11:08:24 发布