resnet keras 结构_bert4keras 任务更新,非结构化文本数据抽取三元组,预训练模型在百度知识抽取数据集上面的表现...

三元组抽取任务,基于“半指针-半标注”结构

文章介绍:https://kexue.fm/archives/7161

数据集:http://ai.baidu.com/broad/download?dataset=sked

最优f1=0.82198

代码来源 bert4keras examples

苏神鼓励大家进行小改动后发文章出来哟。

Baidu Research Open-Access Dataset​ai.baidu.com
4102ca4e1f8c76668a037351a7d4dbdf.png

这个数据集是一个中文的三元组抽取的数据集

{
    "text": "《新駌鸯蝴蝶梦》是黄安的音乐作品,收录在《流金十载全记录》专辑中",
    "spo_list": [
        {
            "subject": "新駌鸯蝴蝶梦",
            "predicate": "所属专辑",
            "object": "流金十载全记录",
            "subject_type": "歌曲",
            "object_type": "音乐专辑"
        },
        {
            "subject": "新駌鸯蝴蝶梦",
            "predicate": "歌手",
            "object": "黄安",
            "subject_type": "歌曲",
            "object_type": "人物"
        }
    ]
}

安装bert4keras

pip install git+https://www.github.com/bojone/bert4keras.git

训练代码如下


import json
import codecs
import numpy as np
import tensorflow as tf
from bert4keras.backend import keras, set_gelu, K
from bert4keras.layers import LayerNormalization
from bert4keras.tokenizer import Tokenizer
from bert4keras.bert import build_bert_model
from bert4keras.optimizers import Adam, ExponentialMovingAverage
from bert4keras.snippets import sequence_padding, DataGenerator
from keras.layers import *
from keras.models import Model
from tqdm import tqdm


maxlen = 128
batch_size = 64
config_path = 'wwm/bert_config.json'
checkpoint_path = 'wwm/bert_model.ckpt'
dict_path = 'wwm/vocab.txt'


def load_data(filename):
    D = []
    with codecs.open(filename, encoding='utf-8') as f:
        for l in f:
            l = json.loads(l)
            D.append({
                'text': l['text'],
                'spo_list': [
                    (spo['subject'], spo['predicate'], spo['object'])
                    for spo in l['spo_list']
                ]
            })
    return D


# 加载数据集
train_data = load_data('kg_huge/train_data.json')
valid_data = load_data('kg_huge/dev_data.json')
predicate2id, id2predicate = {}, {}

with codecs.open('kg_huge/all_50_schemas') as f:
    for l in f:
        l = json.loads(l)
        if l['predicate'] not in predicate2id:
            id2predicate[len(predicate2id)] = l['predicate']
            predicate2id[l['predicate']] = len(predicate2id)

# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)


def search(pattern, sequence):
    """从sequence中寻找子串pattern
    如果找到,返回第一个下标;否则返回-1。
    """
    n = len(pattern)
    for i in range(len(sequence)):
        if sequence[i:i + n] == pattern:
            return i
    return -1


class data_generator(DataGenerator):
    """数据生成器
    """
    def __iter__(self, random=False):
        idxs = list(range(len(self.data)))
        if random:
            np.random.shuffle(idxs)
        batch_token_ids, batch_segment_ids = [], []
        batch_subject_labels, batch_subject_ids, batch_object_labels = [], [], []
        for i in idxs:
            d = self.data[i]
            token_ids, segment_ids = tokenizer.encode(d['text'], max_length=maxlen)
            # 整理三元组 {s: [(o, p)]}
            spoes = {}
            for s, p, o in d['spo_list']:
                s = tokenizer.encode(s)[0][1:-1]
                p = predicate2id[p]
                o = tokenizer.encode(o)[0][1:-1]
                s_idx = search(s, token_ids)
                o_idx = search(o, token_ids)
                if s_idx != -1 and o_idx != -1:
                    s = (s_idx, s_idx + len(s) - 1)
                    o = (o_idx, o_idx + len(o) - 1, p)
                    if s not in spoes:
                        spoes[s] = []
                    spoes[s].append(o)
            if spoes:
                # subject标签
                subject_labels = np.zeros((len(token_ids), 2))
                for s in spoes:
                    subject_labels[s[0], 0] = 1
                    subject_labels[s[1], 1] = 1
                # 随机选一个subject
                start, end = np.array(list(spoes.keys())).T
                start = np.random.choice(start)
                end = np.random.choice(end[end >= start])
                subject_ids = (start, end)
                # 对应的object标签
                object_labels = np.zeros((len(token_ids), len(predicate2id), 2))
                for o in spoes.get(subject_ids, []):
                    object_labels[o[0], o[2], 0] = 1
                    object_labels[o[1], o[2], 1] = 1
                # 构建batch
                batch_token_ids.append(token_ids)
                batch_segment_ids.append(segment_ids)
                batch_subject_labels.append(subject_labels)
                batch_subject_ids.append(subject_ids)
                batch_object_labels.append(object_labels)
                if len(batch_token_ids) == self.batch_size or i == idxs[-1]:
                    batch_token_ids = sequence_padding(batch_token_ids)
                    batch_segment_ids = sequence_padding(batch_segment_ids)
                    batch_subject_labels = sequence_padding(batch_subject_labels, padding=np.zeros(2))
                    batch_subject_ids = np.array(batch_subject_ids)
                    batch_object_labels = sequence_padding(batch_object_labels, padding=np.zeros((len(predicate2id), 2)))
                    yield [
                        batch_token_ids, batch_segment_ids,
                        batch_subject_labels, batch_subject_ids, batch_object_labels
                    ], None
                    batch_token_ids, batch_segment_ids = [], []
                    batch_subject_labels, batch_subject_ids, batch_object_labels = [], [], []


def batch_gather(params, indices):
    """params.shape=[b, n, d],indices.shape=[b]
    从params的第i个序列中选出第indices[i]个向量,返回shape=[b, d]。
    """
    indices = K.cast(indices, 'int32')
    batch_idxs = K.arange(0, K.shape(indices)[0])
    indices = K.stack([batch_idxs, indices], 1)
    return tf.gather_nd(params, indices)


def extrac_subject(inputs):
    """根据subject_ids从output中取出subject的向量表征
    """
    output, subject_ids = inputs
    start = batch_gather(output, subject_ids[:, 0])
    end = batch_gather(output, subject_ids[:, 1])
    subject = K.concatenate([start, end], 1)
    return subject


# 补充输入
subject_labels = Input(shape=(None, 2), name='Subject-Labels')
subject_ids = Input(shape=(2, ), name='Subject-Ids')
object_labels = Input(shape=(None, len(predicate2id), 2), name='Object-Labels')

# 加载预训练模型
bert = build_bert_model(
    config_path=config_path,
    checkpoint_path=checkpoint_path,
    return_keras_model=False,
)

# 预测subject
output = Dense(units=2,
               activation='sigmoid',
               kernel_initializer=bert.initializer)(bert.model.output)
subject_preds = Lambda(lambda x: x**2)(output)

subject_model = Model(bert.model.inputs, subject_preds)

# 传入subject,预测object
# 通过Conditional Layer Normalization将subject融入到object的预测中
output = bert.model.layers[-2].get_output_at(-1)
subject = Lambda(extrac_subject)([output, subject_ids])
output = LayerNormalization(conditional=True)([output, subject])
output = Dense(units=len(predicate2id) * 2,
               activation='sigmoid',
               kernel_initializer=bert.initializer)(output)
output = Reshape((-1, len(predicate2id), 2))(output)
object_preds = Lambda(lambda x: x**4)(output)

object_model = Model(bert.model.inputs + [subject_ids], object_preds)

# 训练模型
train_model = Model(bert.model.inputs + [subject_labels, subject_ids, object_labels],
                    [subject_preds, object_preds])

mask = bert.model.get_layer('Sequence-Mask').output

subject_loss = K.binary_crossentropy(subject_labels, subject_preds)
subject_loss = K.mean(subject_loss, 2)
subject_loss = K.sum(subject_loss * mask) / K.sum(mask)

object_loss = K.binary_crossentropy(object_labels, object_preds)
object_loss = K.sum(K.mean(object_loss, 3), 2)
object_loss = K.sum(object_loss * mask) / K.sum(mask)

train_model.add_loss(subject_loss + object_loss)
train_model.compile(optimizer=Adam(1e-5))


def extract_spoes(text):
    """抽取输入text所包含的三元组
    """
    tokens = tokenizer.tokenize(text, max_length=maxlen)
    token_ids, segment_ids = tokenizer.encode(text, max_length=maxlen)
    # 抽取subject
    subject_preds = subject_model.predict([[token_ids], [segment_ids]])
    start = np.where(subject_preds[0, :, 0] > 0.6)[0]
    end = np.where(subject_preds[0, :, 1] > 0.5)[0]
    subjects = []
    for i in start:
        j = end[end >= i]
        if len(j) > 0:
            j = j[0]
            subjects.append((i, j))
    if subjects:
        spoes = []
        token_ids = np.repeat([token_ids], len(subjects), 0)
        segment_ids = np.repeat([segment_ids], len(subjects), 0)
        subjects = np.array(subjects)
        # 传入subject,抽取object和predicate
        object_preds = object_model.predict([token_ids, segment_ids, subjects])
        for subject, object_pred in zip(subjects, object_preds):
            start = np.where(object_pred[:, :, 0] > 0.6)
            end = np.where(object_pred[:, :, 1] > 0.5)
            for _start, predicate1 in zip(*start):
                for _end, predicate2 in zip(*end):
                    if _start <= _end and predicate1 == predicate2:
                        spoes.append((subject, predicate1, (_start, _end)))
                        break
        return [
            (
                tokenizer.decode(token_ids[0, s[0]:s[1] + 1], tokens[s[0]:s[1] + 1]),
                id2predicate[p],
                tokenizer.decode(token_ids[0, o[0]:o[1] + 1], tokens[o[0]:o[1] + 1])
            ) for s, p, o in spoes
        ]
    else:
        return []


class SPO(tuple):
    """用来存三元组的类
    表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法,
    使得在判断两个三元组是否等价时容错性更好。
    """
    def __init__(self, spo):
        self.spox = (
            tuple(tokenizer.tokenize(spo[0])),
            spo[1],
            tuple(tokenizer.tokenize(spo[2])),
        )

    def __hash__(self):
        return self.spox.__hash__()

    def __eq__(self, spo):
        return self.spox == spo.spox


def evaluate(data):
    """评估函数,计算f1、precision、recall
    """
    X, Y, Z = 1e-10, 1e-10, 1e-10
    f = codecs.open('dev_pred.json', 'w', encoding='utf-8')
    pbar = tqdm()
    for d in data:
        R = set([SPO(spo) for spo in extract_spoes(d['text'])])
        T = set([SPO(spo) for spo in d['spo_list']])
        X += len(R & T)
        Y += len(R)
        Z += len(T)
        f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
        pbar.update()
        pbar.set_description('f1: %.5f, precision: %.5f, recall: %.5f' %
                             (f1, precision, recall))
        s = json.dumps(
            {
                'text': d['text'],
                'spo_list': list(T),
                'spo_list_pred': list(R),
                'new': list(R - T),
                'lack': list(T - R),
            },
            ensure_ascii=False,
            indent=4)
        f.write(s + 'n')
    pbar.close()
    f.close()
    return f1, precision, recall


class Evaluator(keras.callbacks.Callback):
    """评估和保存模型
    """
    def __init__(self):
        self.best_val_f1 = 0.

    def on_epoch_end(self, epoch, logs=None):
        EMAer.apply_ema_weights()
        f1, precision, recall = evaluate(valid_data)
        if f1 >= self.best_val_f1:
            self.best_val_f1 = f1
            train_model.save_weights('best_model.weights')
        EMAer.reset_old_weights()
        print('f1: %.5f, precision: %.5f, recall: %.5f, best f1: %.5fn' %
              (f1, precision, recall, self.best_val_f1))


if __name__ == '__main__':

    train_generator = data_generator(train_data, batch_size)
    evaluator = Evaluator()
    EMAer = ExponentialMovingAverage(0.999)

    train_model.fit_generator(train_generator.forfit(),
                             steps_per_epoch=len(train_generator),
                             epochs=20,
                             callbacks=[evaluator, EMAer])

else:

    train_model.load_weights('best_model.weights')

中文wwm下载地址

ymcui/Chinese-BERT-wwm​github.com
9ddcd117b35941c969c61d511aa8d890.png

wwm小数据集训练截图

c822e0bd90c7000b21436c488886e90d.png

全量数据集第一轮

26e4bce404574b88cb0f91a92e78b5af.png

一轮就已经有79.5的准确率了

0f24a3e44770ead613709d3470d51c79.png

第十六个epoch的时候损失到了0.0122 最佳的f1分数是82.41分

以下是使用Keras实现对四种天气分类的代码,其中使用了ResNet18预训练模型,并对数据进行了打乱。 ```python import tensorflow as tf from tensorflow.keras.layers import Input, Dense, Flatten from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.models import Model from tensorflow.keras.preprocessing.image import ImageDataGenerator # 设置超参数 batch_size = 32 epochs = 10 num_classes = 4 input_shape = (224, 224, 3) # 创建一个ResNet18模型(不包括头部) resnet = ResNet50(include_top=False, input_tensor=Input(shape=input_shape)) # 添加自定义的头部 x = resnet.output x = Flatten()(x) x = Dense(256, activation='relu')(x) x = Dense(num_classes, activation='softmax')(x) # 创建一个新的模型 model = Model(inputs=resnet.input, outputs=x) # 编译模型 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 加载数据并进行数据增强 train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) train_generator = train_datagen.flow_from_directory( 'data/train', target_size=input_shape[:2], batch_size=batch_size, class_mode='categorical') validation_datagen = ImageDataGenerator(rescale=1./255) validation_generator = validation_datagen.flow_from_directory( 'data/validation', target_size=input_shape[:2], batch_size=batch_size, class_mode='categorical') # 训练模型 model.fit( train_generator, steps_per_epoch=len(train_generator), epochs=epochs, validation_data=validation_generator, validation_steps=len(validation_generator)) # 评估模型 test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_directory( 'data/test', target_size=input_shape[:2], batch_size=batch_size, class_mode='categorical') score = model.evaluate(test_generator, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ``` 在上面的代码中,我们使用了`ResNet50`模型,并在其顶部添加了自定义的头部。我们还使用`ImageDataGenerator`类来进行数据增强,并对数据进行了打乱。最后,我们训练模型并评估其性能。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值