模型构建的三种方式总结

方法1:序列化构建:

model = keras.Sequential([
        keras.layers.Flatten(input_shape=(28,28)),
        keras.layers.Dense(128,activation=tf.nn.relu),
        keras.layers.Dense(10,activation=tf.nn.softmax)
    ])

方法2:基于tf.keras.Model

import tensorflow as tf
inputs = tf.keras.layers.Input()
x = tf.keras.layers.Dense(4,activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5,activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs,outputs=outputs)

方法三:自定义,继承tf.keras.Model,灵活度最高,可自定义模型方法

import tensorflow as tf


class ConvMaxPooling1d(tf.keras.layers.Layer):
    def __init__(self, filters, kernel):
        super(ConvMaxPooling1d, self).__init__()
        self.kernel_size = kernel
        #(batch_size, step, embedding_size)->(batch_size,step-kernel_size+1,filter_size)
        self.conv = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel, activation='relu')
        # (batch_size,step-kernel_size+1,filter_size)->
        self.pool = tf.keras.layers.GlobalMaxPool1D()

    def call(self, inputs, masks=None):
        conv_out = self.conv(inputs)
        # if masks is not None:
        #
        #     masks_exp = tf.expand_dims(masks, axis=-1)
        #     # 遮罩处理操作
        #     conv_out += masks_exp[:, self.kernel_size - 1:]
        pool_out = self.pool(conv_out)
        return pool_out


class TextCNN(tf.keras.models.Model):
    def __init__(self, vocab, embedding_size, hidden_size, filters_list=[50 ,60, 70, 80], kernels=[2,3, 4, 5],
                 dropout=0.5, sentence_length=20):
        super(TextCNN, self).__init__()
        ind = tf.feature_column.categorical_column_with_vocabulary_file("sentence_vocab", vocabulary_file=vocab,
                                                                        default_value=0)
        self.embedding_size = embedding_size
        self.sentence_length = sentence_length
        self.dense_feature_layer = tf.keras.layers.DenseFeatures(
            [tf.feature_column.embedding_column(ind, dimension=embedding_size)])

        self.conv_maxs = [ConvMaxPooling1d(f, k) for f, k in zip(filters_list, kernels)]
        self.dropout = tf.keras.layers.Dropout(dropout)
        self.dense = tf.keras.layers.Dense(hidden_size, activation='relu')
        self.classifier = tf.keras.layers.Dense(1, activation='sigmoid')

    # @tf.function(input_signature=(tf.TensorSpec(shape=(None, None), dtype=tf.dtypes.string),))
    def call(self, inputs):
        # ***************word token embedding begin***************
        inputs = tf.convert_to_tensor(inputs)
        inputs_tensor = tf.reshape(inputs, (-1, 1))
        embed_word_vectors1 = self.dense_feature_layer({"sentence_vocab": inputs_tensor})
        embeddings = tf.reshape(embed_word_vectors1, (-1, self.sentence_length, self.embedding_size))
        # ***************word token embedding end***************
        #对于每一个layer来说,输入是:(batch_size,step,embedding_size)->(batch_size,step-kernel_size+1,filter_size)
        conv_outs = [layer(embeddings, None) for layer in self.conv_maxs]
        # 对于每一个layer来说,输入是:[(batch_size,step-kernel_size+1,filter_size)]->(batch_size,step-kernel_size+1,sum(filter_size))
        concat_out = tf.concat(conv_outs, axis=-1)
        dense_out = self.dense(concat_out)
        drop_out = self.dropout(dense_out)
        logits = self.classifier(drop_out)

        return logits



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

会发paper的学渣

您的鼓励和将是我前进的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值