使用LlamaIndex和Cohere进行定制排序器的构建指南

在本教程中,我们将介绍如何使用LlamaIndex抽象和Cohere进行定制排序器的构建。完成后,您将能够创建一个定制排序器并利用它进行增强的数据检索。

环境设置

首先,我们需要安装必要的软件包。

%pip install llama-index-postprocessor-cohere-rerank
%pip install llama-index-llms-openai
%pip install llama-index-finetuning
%pip install llama-index-embeddings-cohere

!pip install llama-index cohere pypdf

初始化API密钥。

openai_api_key = "YOUR OPENAI API KEY"
cohere_api_key = "YOUR COHEREAI API KEY"

import os

os.environ["OPENAI_API_KEY"] = openai_api_key
os.environ["COHERE_API_KEY"] = cohere_api_key

导入所需模块。

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.cohere import CohereEmbedding
from llama_index.core.retrievers import BaseRetriever, VectorIndexRetriever
from llama_index.core import QueryBundle
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
from llama_index.finetuning import generate_cohere_reranker_finetuning_dataset
from llama_index.core.evaluation import generate_question_context_pairs, RetrieverEvaluator
from llama_index.finetuning import CohereRerankerFinetuneEngine

from typing import List
import pandas as pd
import nest_asyncio

nest_asyncio.apply()

下载和加载数据

我们将使用Lyft和Uber 2021年的10K SEC文件作为训练和评估数据。

!mkdir -p 'data/10k/'
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'

加载数据。

lyft_docs = SimpleDirectoryReader(
    input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
    input_files=["./data/10k/uber_2021.pdf"]
).load_data()

数据处理与节点创建

将数据分块,每块限制为400个tokens。

node_parser = SimpleNodeParser.from_defaults(chunk_size=400)
lyft_nodes = node_parser.get_nodes_from_documents(lyft_docs)
uber_nodes = node_parser.get_nodes_from_documents(uber_docs)

使用GPT-4生成问题

llm = OpenAI(temperature=0, model="gpt-4")

qa_generate_prompt_tmpl = """\
Context information is below.

---------------------
{context_str}
---------------------

Given the context information and not prior knowledge.
generate only questions based on the below query.

You are a Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. The questions should not contain options, not start with Q1/ Q2. \
Restrict the questions to the context information provided.\
"""

qa_dataset_lyft_train = generate_question_context_pairs(
    lyft_nodes[:256],
    llm=llm,
    num_questions_per_chunk=1,
    qa_generate_prompt_tmpl=qa_generate_prompt_tmpl,
)

qa_dataset_lyft_train.save_json("lyft_train_dataset.json")

qa_dataset_lyft_val = generate_question_context_pairs(
    lyft_nodes[257:321],
    llm=llm,
    num_questions_per_chunk=1,
    qa_generate_prompt_tmpl=qa_generate_prompt_tmpl,
)

qa_dataset_lyft_val.save_json("lyft_val_dataset.json")

qa_dataset_uber_val = generate_question_context_pairs(
    uber_nodes[:150],
    llm=llm,
    num_questions_per_chunk=1,
    qa_generate_prompt_tmpl=qa_generate_prompt_tmpl,
)

qa_dataset_uber_val.save_json("uber_val_dataset.json")

生成定制排序器训练数据集

embed_model = CohereEmbedding(
    cohere_api_key=cohere_api_key,
    model_name="embed-english-v3.0",
    input_type="search_document",
)

generate_cohere_reranker_finetuning_dataset(
    qa_dataset_lyft_train, finetune_dataset_file_name="train.jsonl"
)

generate_cohere_reranker_finetuning_dataset(
    qa_dataset_lyft_val, finetune_dataset_file_name="val.jsonl"
)

generate_cohere_reranker_finetuning_dataset(
    qa_dataset_lyft_train,
    num_negatives=5,
    hard_negatives_gen_method="random",
    finetune_dataset_file_name="train_5_random.jsonl",
    embed_model=embed_model,
)

generate_cohere_reranker_finetuning_dataset(
    qa_dataset_lyft_val,
    num_negatives=5,
    hard_negatives_gen_method="random",
    finetune_dataset_file_name="val_5_random.jsonl",
    embed_model=embed_model,
)

generate_cohere_reranker_finetuning_dataset(
    qa_dataset_lyft_train,
    num_negatives=5,
    hard_negatives_gen_method="cosine_similarity",
    finetune_dataset_file_name="train_5_cosine_similarity.jsonl",
    embed_model=embed_model,
)

generate_cohere_reranker_finetuning_dataset(
    qa_dataset_lyft_val,
    num_negatives=5,
    hard_negatives_gen_method="cosine_similarity",
    finetune_dataset_file_name="val_5_cosine_similarity.jsonl",
    embed_model=embed_model,
)

训练定制排序器

finetune_model_no_hard_negatives = CohereRerankerFinetuneEngine(
    train_file_name="train.jsonl",
    val_file_name="val.jsonl",
    model_name="lyft_reranker_0_hard_negatives",
    model_type="RERANK",
    base_model="english",
)
finetune_model_no_hard_negatives.finetune()  # 中转API

finetune_model_random_hard_negatives = CohereRerankerFinetuneEngine(
    train_file_name="train_5_random.jsonl",
    val_file_name="val_5_random.jsonl",
    model_name="lyft_reranker_5_random_hard_negatives",
    model_type="RERANK",
    base_model="english",
)
finetune_model_random_hard_negatives.finetune()  # 中转API

finetune_model_cosine_hard_negatives = CohereRerankerFinetuneEngine(
    train_file_name="train_5_cosine_similarity.jsonl",
    val_file_name="val_5_cosine_similarity.jsonl",
    model_name="lyft_reranker_5_cosine_hard_negatives",
    model_type="RERANK",
    base_model="english",
)
finetune_model_cosine_hard_negatives.finetune()  # 中转API

测试定制排序器

reranker_base = CohereRerank(top_n=5)
reranker_model_0 = finetune_model_no_hard_negatives.get_finetuned_model(top_n=5)
reranker_model_5_random = finetune_model_random_hard_negatives.get_finetuned_model(top_n=5)
reranker_model_5_cosine = finetune_model_cosine_hard_negatives.get_finetuned_model(top_n=5)

RERANKERS = {
    "WithoutReranker": "None",
    "CohereRerank": reranker_base,
    "CohereRerank_0": reranker_model_0,
    "CohereRerank_5_random": reranker_model_5_random,
    "CohereRerank_5_cosine": reranker_model_5_cosine,
}

def display_results(embedding_name, reranker_name, eval_results):
    metric_dicts = [eval_result.metric_vals_dict for eval_result in eval_results]
    full_df = pd.DataFrame(metric_dicts)
    hit_rate = full_df["hit_rate"].mean()
    mrr = full_df["mrr"].mean()
    metric_df = pd.DataFrame(
        {
            "Embedding": [embedding_name],
            "Reranker": [reranker_name],
            "hit_rate": [hit_rate],
            "mrr": [mrr],
        }
    )
    return metric_df

index_embed_model = CohereEmbedding(
    cohere_api_key=cohere_api_key,
    model_name="embed-english-v3.0",
    input_type="search_document",
)

query_embed_model = CohereEmbedding(
    cohere_api_key=cohere_api_key,
    model_name="embed-english-v3.0",
    input_type="query",
)

index = VectorStoreIndex.from_documents(
    lyft_docs,
    index_embed_model=index_embed_model,
    query_embed_model=query_embed_model,
    chunk_size=400,
    max_seq_len=512,
    batch_size=4,
)

for reranker_name, reranker in RERANKERS.items():
    if reranker_name == "WithoutReranker":
        retriever = VectorIndexRetriever(index)
    else:
        retriever = VectorIndexRetriever(index, retriever_postprocessor=reranker)
    evaluator = RetrieverEvaluator(retriever=retriever)
    eval_results = evaluator.evaluate(qa_dataset_lyft_val, metric="hit_rate", top_k=5)
    metric_df = display_results("Cohere", reranker_name, eval_results)
    print(metric_df)

在本教程中,我们逐步介绍了如何使用LlamaIndex抽象和Cohere进行定制排序器的构建。通过提供的代码示例,您可以轻松创建一个定制排序器,并利用它进行数据的增强检索。希望本教程能帮助您在相关领域进行进一步的研究和应用。

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值