语义索引系统召回模块之模型动转静,静态模型推理

import os
import numpy as np
import paddle
import paddle.nn.functional as F
from paddlenlp.transformers import AutoModel, AutoTokenizer
from paddlenlp.data import Stack, Tuple, Pad
import sys
from scipy.special import softmax
from scipy import spatial
from paddle import inference
from paddlenlp.datasets import load_dataset
from paddlenlp.utils.log import logger

import paddle.nn as nn

class SimCSE(nn.Layer):

    def __init__(self,
                 pretrained_model,
                 dropout=None,
                 margin=0.0,
                 scale=20,
                 output_emb_size=None):
        super().__init__()
        self.ptm = pretrained_model
        self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
        self.output_emb_size = output_emb_size
        if output_emb_size > 0:
            weight_attr = paddle.ParamAttr(
                initializer=paddle.nn.initializer.TruncatedNormal(std=0.02))
            self.emb_reduce_linear = paddle.nn.Linear(768,
                                                      output_emb_size,
                                                      weight_attr=weight_attr)

        self.margin = margin
        # Used scaling cosine similarity to ease converge
        self.sacle = scale

    @paddle.jit.to_static(input_spec=[
        paddle.static.InputSpec(shape=[None, None], dtype='int64'),
        paddle.static.InputSpec(shape=[None, None], dtype='int64')
    ])
    def get_pooled_embedding(self,
                             input_ids,
                             token_type_ids=None,
                             position_ids=None,
                             attention_mask=None,
                             with_pooler=True):

        # Note: cls_embedding is poolerd embedding with act tanh
        sequence_output, cls_embedding = self.ptm(input_ids, token_type_ids,
                                                  position_ids, attention_mask)

        if with_pooler == False:
            cls_embedding = sequence_output[:, 0, :]

        if self.output_emb_size > 0:
            cls_embedding = self.emb_reduce_linear(cls_embedding)

        cls_embedding = self.dropout(cls_embedding)
        cls_embedding = F.normalize(cls_embedding, p=2, axis=-1)

        return cls_embedding

    def get_semantic_embedding(self, data_loader):
        self.eval()
        with paddle.no_grad():
            for batch_data in data_loader:
                input_ids, token_type_ids = batch_data
                input_ids = paddle.to_tensor(input_ids)
                token_type_ids = paddle.to_tensor(token_type_ids)

                text_embeddings = self.get_pooled_embedding(
                    input_ids, token_type_ids=token_type_ids)

                yield text_embeddings

    def cosine_sim(self,
                   query_input_ids,
                   title_input_ids,
                   query_token_type_ids=None,
                   query_position_ids=None,
                   query_attention_mask=None,
                   title_token_type_ids=None,
                   title_position_ids=None,
                   title_attention_mask=None,
                   with_pooler=True):

        query_cls_embedding = self.get_pooled_embedding(query_input_ids,
                                                        query_token_type_ids,
                                                        query_position_ids,
                                                        query_attention_mask,
                                                        with_pooler=with_pooler)

        title_cls_embedding = self.get_pooled_embedding(title_input_ids,
                                                        title_token_type_ids,
                                                        title_position_ids,
                                                        title_attention_mask,
                                                        with_pooler=with_pooler)

        cosine_sim = paddle.sum(query_cls_embedding * title_cls_embedding,
                                axis=-1)
        return cosine_sim

    def forward(self,
                query_input_ids,
                title_input_ids,
                query_token_type_ids=None,
                query_position_ids=None,
                query_attention_mask=None,
                title_token_type_ids=None,
                title_position_ids=None,
                title_attention_mask=None):

        query_cls_embedding = self.get_pooled_embedding(query_input_ids,
                                                        query_token_type_ids,
                                                        query_position_ids,
                                                        query_attention_mask)

        title_cls_embedding = self.get_pooled_embedding(title_input_ids,
                                                        title_token_type_ids,
                                                        title_position_ids,
                                                        title_attention_mask)

        cosine_sim = paddle.matmul(query_cls_embedding,
                                   title_cls_embedding,
                                   transpose_y=True)

        # substract margin from all positive samples cosine_sim()
        margin_diag = paddle.full(shape=[query_cls_embedding.shape[0]],
                                  fill_value=self.margin,
                                  dtype=paddle.get_default_dtype())

        cosine_sim = cosine_sim - paddle.diag(margin_diag)

        # scale cosine to ease training converge
        cosine_sim *= self.sacle

        labels = paddle.arange(0, query_cls_embedding.shape[0], dtype='int64')
        labels = paddle.reshape(labels, shape=[-1, 1])

        loss = F.cross_entropy(input=cosine_sim, label=labels)

        return loss

params_path='./checkpoint/bd_paddle_yysy/model_state.pdparams'
output_path='./output/yysy'
model_name_or_path='rocketqa-zh-base-query-encoder'

output_emb_size = 256
pretrained_model = AutoModel.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model = SimCSE(pretrained_model, output_emb_size=output_emb_size)

if params_path and os.path.isfile(params_path):
    state_dict = paddle.load(params_path)
    model.set_dict(state_dict)
    print("Loaded parameters from %s" % params_path)

model.eval()#评估测试模式
model = paddle.jit.to_static(#动转静
    model,
    input_spec=[
        paddle.static.InputSpec(shape=[None, None],
                                dtype="int64"),  # input_ids
        paddle.static.InputSpec(shape=[None, None],
                                dtype="int64")  # segment_ids
    ])
save_path = os.path.join(output_path, "inference")
paddle.jit.save(model, save_path)

sys.path.append('.')#用于动态地修改模块的搜索路径,将当前目录添加到Python的模块搜索路径中。

model_dir='./output/yysy/'

max_seq_length=128
batch_size=15
device='gpu'
model_name_or_path='rocketqa-zh-base-query-encoder'
precision='fp32'
cpu_threads=8
benchmark=False
save_log_path='./log/yysy/recall_deploy'

def convert_example(example, tokenizer, max_seq_length=512, do_evalute=False):
    result = []
    for key, text in example.items():
        encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length)
        input_ids = encoded_inputs["input_ids"]
        token_type_ids = encoded_inputs["token_type_ids"]
        result += [input_ids, token_type_ids]
    return result

model_file = model_dir + "inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "inference.get_pooled_embedding.pdiparams"

if not os.path.exists(model_file):
        raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
    raise ValueError("not find params file path {}".format(params_file))
config = paddle.inference.Config(model_file, params_file)#获取静态模型配置

config.enable_use_gpu(100, 0)
precision_map = {
    "fp16": inference.PrecisionType.Half,
    "fp32": inference.PrecisionType.Float32,
    "int8": inference.PrecisionType.Int8
}
precision_mode = precision_map[precision]

config.switch_use_feed_fetch_ops(False)#按需加载
predictor = paddle.inference.create_predictor(config)#模型推理器

predictor.get_input_names()#['input_ids', 'token_type_ids']

predictor.get_output_names()#['elementwise_div_2']

input_handles = [
    predictor.get_input_handle(name)
    for name in predictor.get_input_names()
]
output_handle = predictor.get_output_handle(
    predictor.get_output_names()[0])

id2corpus = {0: '国有企业引入非国有资本对创新绩效的影响——基于制造业国有上市公司的经验证据'}#模拟用户查询

 corpus_list = [{idx: text} for idx, text in id2corpus.items()]

def extract_embedding(data, tokenizer):
    examples = []
    for text in data:
        input_ids, segment_ids = convert_example(text, tokenizer)
        # print(np.array(input_ids).shape,np.array(segment_ids).shape) # (39,)
        examples.append((input_ids, segment_ids))#用来添加每个样本数据(tokens,token_types)
    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # input
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # segment
    ): fn(samples)
    input_ids, segment_ids = batchify_fn(examples)#每个批次填充到相同序列长度
    input_handles[0].copy_from_cpu(input_ids)#0号处理tokens
    input_handles[1].copy_from_cpu(segment_ids)#1号处理token_types
    predictor.run()#推理
    logits = output_handle.copy_to_cpu()#模型推理结果
    return logits

res = extract_embedding(corpus_list, tokenizer)

 print(res.shape)

#预测相似文本对的语义相似度
corpus_list = [['中西方语言与文化的差异', '中西方文化差异以及语言体现中西方文化,差异,语言体现'],
               ['中西方语言与文化的差异', '飞桨致力于让深度学习技术的创新与应用更简单']]

def predict(data, tokenizer):
    examples = []
    for idx, text in enumerate(data):
        input_ids, segment_ids = convert_example({idx: text[0]}, tokenizer)#文本对的第一个文本,query
        title_ids, title_segment_ids = convert_example({idx: text[1]}, tokenizer)#文本对的第二个文本,title
        examples.append(
            (input_ids, segment_ids, title_ids, title_segment_ids))
    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # input
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # segment
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # segment
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # segment
    ): fn(samples)
    query_ids, query_segment_ids, title_ids, title_segment_ids = batchify_fn(
        examples)
    input_handles[0].copy_from_cpu(query_ids)
    input_handles[1].copy_from_cpu(query_segment_ids)
    predictor.run()
    query_logits =output_handle.copy_to_cpu()#query语义向量
    input_handles[0].copy_from_cpu(title_ids)
    input_handles[1].copy_from_cpu(title_segment_ids)
    predictor.run()
    title_logits = output_handle.copy_to_cpu()
    print(f'{query_logits.shape=},{title_logits.shape=}')
    result = [
        float(1 - spatial.distance.cosine(arr1, arr2))
        for arr1, arr2 in zip(query_logits, title_logits)
    ]
    return result

res = predict(corpus_list, tokenizer)
print(res)

  • 10
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值