RAG
RAG(Retrieval-Augmented Generation),是一种结合了信息检索技术与语言生成模型的人工智能技术。
由于大模型,例如chatgpt、qwen2、llama3等都是在大规模通识数据库上进行预训练,因此对于特定领域的知识关注较少,使用RAG方法,可以将特定领域知识与大模型(llm)能力进行有机结合,得到更好的效果。
本实验使用的是Qwen2-1.5b因为显卡大小只有8G。
RAG的的构建由以下几部分构成:
1、数据库的数据(例如:专业领域的论文、书籍、笔记等):个人感觉将pdf转成markdown格式的效果更好。例如:marker
2、数据的向量嵌入(Embeding):常见的嵌入模型可以在下面网站进行获取,里面包含大量的中文和英文嵌入模型。https://huggingface.co/spaces/mteb/leaderboard
3、向量数据库的建立:目的是存储数据的嵌入向量,并在用户阶段进行检索。向量可以看一文了解向量数据库在RAG中扮演的角色_一文了解向量数据库在rag中的扮演的角色-CSDN博客
4、用户查询和检索:用户询问相关领域的问题。首先通过向量嵌入模型对输入文本进行嵌入,然后在向量数据库中进行查找,从高到低进行排序,选择相似度最高的进行返回。
5、结果生成:模型根据检索情况、prompt的提示方法和模型本身具有的能力进行结合,得出最终结果。
图片来源:
实验代码:
1、加载包:
import jieba, json, pdfplumber
#BM25 算法是一种用于估计文档与查询之间相关性的算法,广泛用于搜索引擎中。它考虑了词频(TF,即一个词在文档中出现的次数)、逆文档频率(IDF,即一个词在所有文档中出现的普遍重要性的度量)以及文档长度等因素,以计算每个文档与查询的相关性得分。
from rank_bm25 import BM25Okapi
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
2、数据处理:
import jieba, json, pdfplumber
# 对长文本进行切分
def split_text_fixed_size(text, chunk_size, overlap_size):
new_text = []
for i in range(0, len(text), chunk_size):
if i == 0:
new_text.append(text[0:chunk_size])
else:
new_text.append(text[i - overlap_size:i + chunk_size])
# new_text.append(text[i:i + chunk_size])
return new_text
def read_data(query_data_path, knowledge_data_path):
with open(query_data_path, 'r', encoding='utf-8') as f:
questions = json.load(f)
pdf = pdfplumber.open(knowledge_data_path)
# 标记当前页与其文本知识
pdf_content = []
for page_idx in range(len(pdf.pages)):
text = pdf.pages[page_idx].extract_text()
new_text = split_text_fixed_size(text, chunk_size=100, overlap_size=5)
for chunk_text in new_text:
pdf_content.append({
'page' : 'page_' + str(page_idx + 1),
'content': chunk_text
})
return questions, pdf_content
3、向量嵌入:
device = "cuda"
#bge-base-zh-v1.5由智源研究院(BAAI)开源的中文Embedding模型。
embeding_path=r"C:\Users\Administrator\Desktop\llm-rag\embedding_model\bge-base-zh-v1.5"
rerank_tokenizer = AutoTokenizer.from_pretrained(embeding_path)
rerank_model = AutoModelForSequenceClassification.from_pretrained(embeding_path)
rerank_model.cuda()
4、加载qwen2 1.5b:(模型下载地址:魔搭社区)
path = r'C:\Users\Administrator\Desktop\lllm-fine_turning\Qwen2-1.5B-Instruct'
model = AutoModelForCausalLM.from_pretrained(
path,
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(path)
5、文本分割和计算询问向量与向量数据库中的得分最高值
#分段
def split_text_fixed_size(text, chunk_size, overlap_size):
new_text = []
for i in range(0, len(text), chunk_size):
if i == 0:
new_text.append(text[0:chunk_size])
else:
new_text.append(text[i - overlap_size:i + chunk_size])
# new_text.append(text[i:i + chunk_size])
return new_text
#计算检索相似度最大值
def get_rank_index(max_score_page_idxs_, questions_, pdf_content_):
pairs = []
for idx in max_score_page_idxs_:
pairs.append([questions_[query_idx]["question"], pdf_content_[idx]['content']])
inputs = rerank_tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
with torch.no_grad():
inputs = {key: inputs[key].cuda() for key in inputs.keys()}
scores = rerank_model(**inputs, return_dict=True).logits.view(-1, ).float()
max_score = scores.cpu().numpy().argmax()
index = max_score_page_idxs_[max_score]
return max_score, index
6、文本读取和prompt方法
def read_data(query_data_path, knowledge_data_path):
with open(query_data_path, 'r', encoding='utf-8') as f:
questions = json.load(f)
pdf = pdfplumber.open(knowledge_data_path)
# 标记当前页与其文本知识
pdf_content = []
for page_idx in range(len(pdf.pages)):
text = pdf.pages[page_idx].extract_text()
new_text = split_text_fixed_size(text, chunk_size=100, overlap_size=5)
for chunk_text in new_text:
pdf_content.append({
'page' : 'page_' + str(page_idx + 1),
'content': chunk_text
})
return questions, pdf_content
def qwen_preprocess(tokenizer_, ziliao, question):
# tokenizer.apply_chat_template() 与model.generate搭配使用
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"帮我结合给定的资料,回答问题。如果问题答案无法从资料中获得,"
f"输出结合给定的资料,无法回答问题. 如果找到答案, 就输出找到的答案, 资料:{ziliao}, 问题:{question}"},
]
# dd_generation_prompt 参数用于在输入中添加生成提示,该提示指向 <|im_start|>assistant\n
text = tokenizer_.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs_ = tokenizer_([text], return_tensors="pt").to(device)
input_ids = tokenizer_.encode(text, return_tensors='pt')
attention_mask_ = torch.ones(input_ids.shape, dtype=torch.long, device=device)
# print(model_inputs)
# sys.exit()
return model_inputs_, attention_mask_
7、主函数调用上面函数
if __name__ == '__main__':
questions, pdf_content = read_data(query_data_path=r"C:\Users\Administrator\Desktop\llm-rag\datasets\汽车知识问答\questions.json",
knowledge_data_path=r'C:\Users\Administrator\Desktop\llm-rag\datasets\汽车知识问答\初赛训练数据集.pdf')
# 文本检索类向量库
pdf_content_words = [jieba.lcut(x['content']) for x in pdf_content]
bm25 = BM25Okapi(pdf_content_words)
# 语义检索类向量库
sent_model = SentenceTransformer(
r'C:\Users\Administrator\Desktop\llm-rag\embedding_model\stella-mrl-large-zh-v3.5-1792d'
)
question_sentences = [x['question'] for x in questions]
pdf_content_sentences = [x['content'] for x in pdf_content]
question_embeddings = sent_model.encode(question_sentences, normalize_embeddings=True)
pdf_embeddings = sent_model.encode(pdf_content_sentences, normalize_embeddings=True)
for query_idx in range(len(questions)):
# 首先进行BM25检索
doc_scores = bm25.get_scores(jieba.lcut(questions[query_idx]["question"]))
bm25_score_page_idxs = doc_scores.argsort()[-10:]
# 再进行语义检索
score = question_embeddings[query_idx] @ pdf_embeddings.T
ste_score_page_idxs = score.argsort()[-10:]
# questions[query_idx]['reference'] = 'page_' + str(max_score_page_idx)
# questions[query_idx]['reference'] = pdf_content[max_score_page_idxs]['page']
bm25_score, bm25_index = get_rank_index(bm25_score_page_idxs, questions, pdf_content)
ste_score, ste_index = get_rank_index(ste_score_page_idxs, questions, pdf_content)
max_score_page_idx = 0
if ste_score >= bm25_score:
questions[query_idx]['reference'] = 'page_' + str(ste_index + 1)
max_score_page_idx = ste_index
else:
questions[query_idx]['reference'] = 'page_' + str(bm25_index + 1)
max_score_page_idx = bm25_index
model_inputs, attention_mask = qwen_preprocess(
tokenizer, pdf_content[max_score_page_idx]['content'], questions[query_idx]["question"]
)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=128, # 最大输出长度.
attention_mask=attention_mask,
pad_token_id=tokenizer.eos_token_id
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
# print(response)
# answer = ask_glm(pdf_content[max_score_page_idx]['content'], questions[query_idx]["question"])
print(f'question: {questions[query_idx]["question"]}, answer: {response}')
代码部分大多来源于使用Qwen2进行RAG代码实践_qwen2 rag-CSDN博客