中文文本转换为bert输入的input_ids、input_mask、segment_ids

在这里插入图片描述

关注公众号“自然语言处理与算法”,茫茫世界不迷路~

背景:最近在一家小公司搞了一个基于BERT的文本分类的项目,该项目训练好之后需要将模型保存为saved model形式tensorflow将BERT模型保存为PB(saved model)并部署,最终部署在阿里云机器学习平台上。客户端发送测试请求之前需要先把文本数据转换为BERT要求的形式,即input_ids、input_mask、segment_ids。
目前全网好像没有找到类似的工具(有的话就当我年少无知【手动狗头~】),所以把这部分内容摘了出来贡献给各位,觉得有用的话欢迎点赞,讨论。

源码

#!/usr/bin/python
# -*- coding: UTF-8 -*-

# @Time: 2020/9/30 10:16 
# @Author: zhk
# @Description:数据预处理,将输入放入文本转换为bert输入的input_ids、input_mask、segment_ids


import csv
import pandas as pd
from bert import tokenization, modeling
import tensorflow as tf
from sklearn.utils import shuffle


class InputExample(object):
    """A single training/test example for simple sequence classification."""

    def __init__(self, guid, text_a, text_b=None, label=None):
        """Constructs a InputExample.
        Args:
          guid: Unique id for the example.
          text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
          text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
          label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.guid = guid
        self.text_a = text_a
        self.text_b = text_b
        self.label = label

class PaddingInputExample(object):
    """Fake example so the num input examples is a multiple of the batch size.
    When running eval/predict on the TPU, we need to pad the number of examples
    to be a multiple of the batch size, because the TPU requires a fixed batch
    size. The alternative is to drop the last batch, which is bad because it means
    the entire output data won't be generated.
    We use this class instead of `None` because treating `None` as padding
    battches could cause silent errors.
    """


class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self,
                 input_ids,
                 input_mask,
                 segment_ids,
                 label_id,
                 is_real_example=True):
        self.input_ids = input_ids
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        self.label_id = label_id
        self.is_real_example = is_real_example


class DataProcessor(object):
    """Base class for data converters for sequence classification data sets."""

    def get_train_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the train set."""
        raise NotImplementedError()

    def get_dev_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the dev set."""
        raise NotImplementedError()

    def get_test_examples(self, data_dir):
        """Gets a collection of `InputExample`s for prediction."""
        raise NotImplementedError()

    def get_labels(self):
        """Gets the list of labels for this data set."""
        raise NotImplementedError()

    @classmethod
    def _read_tsv(cls, input_file, quotechar=None):
        """Reads a tab separated value file."""
        with tf.gfile.Open(input_file, "r") as f:
            reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
            lines = []
            for line in reader:
                lines.append(line)
            return lines


class MyProcessor(DataProcessor):

    # 读取文件
    def read_txt(self, filepath, type):
        df = pd.read_csv(filepath + '/' + type + '.csv', delimiter=",", names=['labels', 'text'], header=None, engine='python')
        df = shuffle(df)  # shuffle数据
        lines = []
        for data in df.iloc[:].itertuples():
            content = str(data.labels) + "\t" + str(data.text)
            lines.append(content)
        return lines

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self.read_txt(data_dir, 'train'), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self.read_txt(data_dir, 'dev'), "dev")

    # def get_test_examples(self, data_dir):
    #     """See base class."""
    #     return self._create_examples(
    #         self.read_txt(os.path.join(data_dir, "test.txt"), "test"), "test")

    def get_labels(self):
        """See base class."""
		#待转换的文件目录
        df = pd.read_csv('/home/data/dev.csv', delimiter=",", names=['labels', 'text'], header=None)
        labels_df = df[['labels']]
        labels_df = labels_df.drop_duplicates()

        labels = []
        for data in labels_df.iloc[:].itertuples():
            labels.append(data.labels)
        #labels = [0,1,2]
        return labels

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, i)
            split_line = line.strip().split("\t")
            text_a = tokenization.convert_to_unicode(split_line[1])
            text_b = None
            if set_type == "test":
                label = "6efaa392"
            else:
                label = tokenization.convert_to_unicode(split_line[0])
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples


def convert_single_example(ex_index, example, max_seq_length,
                           tokenizer):
    """Converts a single `InputExample` into a single `InputFeatures`."""

    if isinstance(example, PaddingInputExample):
        return InputFeatures(
            input_ids=[0] * max_seq_length,
            input_mask=[0] * max_seq_length,
            segment_ids=[0] * max_seq_length,
            label_id=0,
            is_real_example=False)

    # label_map = {}
    # for (i, label) in enumerate(label_list):
    #     label_map[label] = i

    # 保存标签信息和枚举的映射关系 并写入label2id.pkl 该文件会一起输出到trans_model_dir文件夹下
    # output_label2id_file = os.path.join(FLAGS.trans_model_dir, "label2id.pkl")
    # if not os.path.exists(output_label2id_file):
    #     with open(output_label2id_file, 'wb') as w:
    #         pickle.dump(label_map, w)

    tokens_a = tokenizer.tokenize(example.text_a)
    tokens_b = None
    if example.text_b:
        tokens_b = tokenizer.tokenize(example.text_b)

    if tokens_b:
        # Modifies `tokens_a` and `tokens_b` in place so that the total
        # length is less than the specified length.
        # Account for [CLS], [SEP], [SEP] with "- 3"
        _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
    else:
        # Account for [CLS] and [SEP] with "- 2"
        if len(tokens_a) > max_seq_length - 2:
            tokens_a = tokens_a[0:(max_seq_length - 2)]

    tokens = []
    segment_ids = []
    tokens.append("[CLS]")
    segment_ids.append(0)
    for token in tokens_a:
        tokens.append(token)
        segment_ids.append(0)
    tokens.append("[SEP]")
    segment_ids.append(0)

    if tokens_b:
        for token in tokens_b:
            tokens.append(token)
            segment_ids.append(1)
        tokens.append("[SEP]")
        segment_ids.append(1)

    input_ids = tokenizer.convert_tokens_to_ids(tokens)

    # The mask has 1 for real tokens and 0 for padding tokens. Only real
    # tokens are attended to.
    input_mask = [1] * len(input_ids)

    # Zero-pad up to the sequence length.
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)

    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length

    #label_id = label_map[example.label]
    label_id = 0
    if ex_index < 5:
        tf.logging.info("*** Example ***")
        tf.logging.info("guid: %s" % (example.guid))
        tf.logging.info("tokens: %s" % " ".join(
            [tokenization.printable_text(x) for x in tokens]))
        tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
        tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
        tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
        tf.logging.info("label: %s (id = %d)" % (example.label, label_id))

    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        label_id=label_id,
        is_real_example=True)
    return feature



def _truncate_seq_pair(tokens_a, tokens_b, max_length):
    """Truncates a sequence pair in place to the maximum length."""

    # This is a simple heuristic which will always truncate the longer sequence
    # one token at a time. This makes more sense than truncating an equal percent
    # of tokens from each, since if one sequence is very short then each token
    # that's truncated likely contains more information than a longer sequence.
    while True:
        total_length = len(tokens_a) + len(tokens_b)
        if total_length <= max_length:
            break
        if len(tokens_a) > len(tokens_b):
            tokens_a.pop()
        else:
            tokens_b.pop()



def main():
    basedir = '/home/pretain/'
    bert_config_file = basedir + 'chinese_L-12_H-768_A-12/bert_config.json'
    vocab_file = basedir + 'chinese_L-12_H-768_A-12/vocab.txt'
    init_checkpoint = basedir + 'bert_model.ckpt'
    do_lower_case = True
    max_seq_length = 200

    processor = MyProcessor()
    '''将train作为测试数据,这里是为了测试程序正确性'''
    lines = processor.read_txt('/home/data','test')
    examples = processor._create_examples(lines, 'test')

    tokenization.validate_case_matches_checkpoint(do_lower_case, init_checkpoint)

    bert_config = modeling.BertConfig.from_json_file(bert_config_file)

    tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
    '''将提取出来的结果保存至res.txt文件'''
    f3 = open('./res.txt', 'w', encoding='utf-8')
    for (ex_index, example) in enumerate(examples):
        feature = convert_single_example(ex_index, example, max_seq_length, tokenizer)
        print(feature.input_ids)
        print(feature.input_mask)
        print(feature.segment_ids)
        print(feature.label_id)

        f3.write(str(feature.input_ids) + '\n')
        f3.write(str(feature.input_mask) + '\n')
        f3.write(str(feature.segment_ids) + '\n')
        f3.write(str(feature.label_id) + '\n')
        f3.write('\n')

main()

数据
(1)待转换的数据,共两列labels与text(标签与原文本,英文状态下逗号隔开),这里可以根据自己的需要调整。
例:

0,大华技术:超高精度人体热成像测温系统经信发布2020-02-2921:51:17●浙江大华技术股份有限公司浙江大华技术股份有限公司超高精度人体热成像测温系统通过热成像+黑体非接触式对人员体温进行初筛,对异常体温进行实时预警,可有效提高人员通行率,保护现场防疫工作人员。大华超高精度人体热成像测温系统采
0,A股3月迎来艳阳天牛市布局正当时!这类股成主力新宠儿涨停战机2020-03-0217:29:14全局扫描今日两市资金热度明显提升,热点板块持续拉升,赚钱效应一片大好。今日三大股指全线反弹,沪指高开高走回补缺口,两市各个板块全线飘红,仅百余只个股下跌。盘面上,以水泥为代表的基建类板块集体走强,5G板块

(2)转换后的数据,每个样本转换后为4行,第一行为input_ids,第二行为input_mask、第三行segment_ids,第四行为label_id(label_id是写死的均为0,可以把这一块代码删掉,预测时用不到label)
样例:

[101, 7270, 3217, 1112, 3299, 7770, 3173, 2825, 3318, 772, 689, 2458, 1355, 1277, 1423, 3209, 928, 2622, 2825, 3318, 5500, 819, 3300, 7361, 1062, 1385, 1447, 2339, 3300, 2415, 6822, 6121, 4495, 772, 3198, 7313, 8038, 8439, 118, 8140, 118, 8146, 3341, 3975, 8038, 1395, 3360, 3189, 2845, 1068, 7241, 6404, 8038, 4554, 2658, 7344, 2971, 1908, 2339, 1908, 772, 150, 8178, 8253, 1920, 1395, 5381, 118, 1395, 3360, 3189, 2845, 2135, 3175, 5381, 4991, 150, 8178, 8253, 1920, 1395, 5381, 118, 1395, 3360, 3189, 2845, 2135, 3175, 5381, 4991, 150, 8178, 8253, 1920, 1395, 5381, 118, 1395, 3360, 3189, 2845, 2135, 3175, 5381, 4991, 1908, 2339, 1908, 772, 809, 3341, 8024, 7270, 3217, 1112, 3299, 7770, 3173, 2825, 3318, 772, 689, 2458, 1355, 1277, 1423, 3209, 928, 2622, 2825, 3318, 5500, 819, 3300, 7361, 1062, 1385, 1780, 2898, 100, 1912, 7344, 6783, 1057, 8024, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
0

[101, 3173, 3857, 4906, 2825, 135, 4003, 1220, 3173, 7319, 135, 3633, 3152, 3173, 7319, 1745, 4275, 1300, 2145, 6228, 7574, 3173, 7319, 3173, 7319, 1745, 4275, 1300, 2145, 6228, 7574, 676, 1146, 7164, 4692, 6568, 2845, 8078, 2213, 1501, 2125, 6981, 5852, 3119, 1872, 6862, 7674, 3613, 7360, 5635, 702, 855, 3144, 8024, 2342, 7030, 7361, 1545, 5500, 6237, 4881, 3341, 6159, 676, 1146, 7164, 4692, 6568, 2845, 8078, 2213, 1501, 2125, 6981, 5852, 3119, 1872, 6862, 7674, 3613, 7360, 5635, 702, 855, 3144, 8024, 2342, 7030, 7361, 1545, 5500, 6237, 4881, 3341, 6159, 8439, 2399, 8140, 3299, 8146, 3189, 8128, 131, 8155, 4518, 7481, 3173, 7319, 3173, 3857, 6568, 5307, 8172, 5367, 2207, 2099, 860, 3123, 1920, 2099, 860, 3119, 5966, 2544, 1300, 2544, 928, 1146, 775, 5596, 6380, 8186, 10624, 4958, 7313, 1333, 3403, 7579, 8038, 676, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
0

一键三连~谢谢

  • 15
    点赞
  • 43
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
import tensorflow as tf import tensorflow_hub as hub from tensorflow.keras import layers import bert import numpy as np from transformers import BertTokenizer, BertModel # 设置BERT模型的路径和参数 bert_path = "E:\\AAA\\523\\BERT-pytorch-master\\bert1.ckpt" max_seq_length = 128 train_batch_size = 32 learning_rate = 2e-5 num_train_epochs = 3 # 加载BERT模型 def create_model(): input_word_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_word_ids") input_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_mask") segment_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="segment_ids") bert_layer = hub.KerasLayer(bert_path, trainable=True) pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) output = layers.Dense(1, activation='sigmoid')(pooled_output) model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=output) return model # 准备数据 def create_input_data(sentences, labels): tokenizer = bert.tokenization.FullTokenizer(vocab_file=bert_path + "trainer/vocab.small", do_lower_case=True) # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') input_ids = [] input_masks = [] segment_ids = [] for sentence in sentences: tokens = tokenizer.tokenize(sentence) tokens = ["[CLS]"] + tokens + ["[SEP]"] input_id = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_id) segment_id = [0] * len(input_id) padding_length = max_seq_length - len(input_id) input_id += [0] * padding_length input_mask += [0] * padding_length segment_id += [0] * padding_length input_ids.append(input_id) input_masks.append(input_mask) segment_ids.append(segment_id) return np.array(input_ids), np.array(input_masks), np.array(segment_ids), np.array(labels) # 加载训练数据 train_sentences = ["Example sentence 1", "Example sentence 2", ...] train_labels = [0, 1, ...] train_input_ids, train_input_masks, train_segment_ids, train_labels = create_input_data(train_sentences, train_labels) # 构建模型 model = create_model() model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate), loss='binary_crossentropy', metrics=['accuracy']) # 开始微调 model.fit([train_input_ids, train_input_masks, train_segment_ids], train_labels, batch_size=train_batch_size, epochs=num_train_epochs)这段代码有什么问题吗?
05-24
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值