TextCNN tensorflow实现

1.导入库
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
2.定义TextCNN类
class TextCNN(object):
	def __init__(self):
	def model(self):
	def fit(self):
	def batch_iter(self):
	def predict(self):

其中包括五个函数,分别是初始化函数,构建模型函数,训练模型函数,生成batch函数,预测函数

3.实现初始化函数
    def __init__(self,
            num_classes,
            seq_length,
            vocab_size,
            embedding_size,
            learning_rate,
            learning_decay_rate,
            learning_decay_steps,
            epoch,
            filter_sizes,
            num_filters,
            dropout_keep_prob,
            l2_lambda
            ):
        self.num_classes = num_classes
        self.seq_length = seq_length
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.learning_rate = learning_rate
        self.learning_decay_rate = learning_decay_rate
        self.learning_decay_steps = learning_decay_steps
        self.epoch = epoch
        self.filter_sizes = filter_sizes
        self.num_filters = num_filters
        self.dropout_keep_prob = dropout_keep_prob
        self.l2_lambda = l2_lambda
        self.inputs = tf.placeholder(tf.int32, [None, self.seq_length], name='inputs')
        self.targets = tf.placeholder(tf.float32, [None, self.num_classes], name='targets')
        self.l2_loss = tf.constant(0.0)
        self.model()

初始化一些必要的参数

4.实现构建模型函数

这儿实现的是textcnn-rand,即先随机化词向量,然后在bp的过程中不断更新,也可以做一些改动,将已经训练好的词向量作为参数,然后可以选择在bp过程中是否对词向量进行调整

    def model(self):
        # embedding层
        with tf.name_scope("embedding"):
            self.embedding = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),
                                        name="embedding")
            self.embedding_inputs = tf.nn.embedding_lookup(self.embedding, self.inputs)
            self.embedding_inputs = tf.expand_dims(self.embedding_inputs, -1)

        # 卷积层 + 池化层
        pooled_outputs = []
        for i, filter_size in enumerate(self.filter_sizes):
            with tf.name_scope("conv_{0}".format(filter_size)):
                filter_shape = [filter_size, self.embedding_size, 1, self.num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedding_inputs,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv"
                )
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, self.seq_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool"
                )
                pooled_outputs.append(pooled)

        # 将每种尺寸的卷积核得到的特征向量进行拼接
        num_filters_total = self.num_filters * len(self.filter_sizes)
        h_pool = tf.concat(pooled_outputs, 3)
        h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])

        # 对最终得到的句子向量进行dropout
        with tf.name_scope("dropout"):
            h_drop = tf.nn.dropout(h_pool_flat, self.dropout_keep_prob)

        # 全连接层
        with tf.name_scope("output"):
            W = tf.get_variable("W", shape=[num_filters_total, self.num_classes],
                                initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b")
            self.l2_loss += tf.nn.l2_loss(W)
            self.l2_loss += tf.nn.l2_loss(b)
            self.logits = tf.nn.xw_plus_b(h_drop, W, b, name="scores")
            self.pred = tf.argmax(self.logits, 1, name="predictions")

        # 损失函数
        with tf.name_scope('loss'):
            self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.inputs)) + self.l2_lambda * self.l2_loss

        # 优化函数
        with tf.name_scope('optimizer'):
            self.global_step = tf.train.get_or_create_global_step()
            learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,
                                                       self.learning_decay_steps, self.learning_decay_rate,
                                                       staircase=True)

            optimizer = tf.train.AdamOptimizer(learning_rate)
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            self.optim = slim.learning.create_train_op(total_loss=self.loss, optimizer=optimizer, update_ops=update_ops)

        # 准确率
        with tf.name_scope('accuracy'):
            correct_predictions = tf.equal(self.pred, tf.argmax(self.targets, 1))
            self.acc = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")

5.实现训练模型函数
    def fit(self, train_x, train_y, val_x, val_y, batch_size):
        # 创建模型保存路径
        if not os.path.exists('./saves/textcnn'): os.makedirs('./saves/textcnn')
        if not os.path.exists('./train_logs/textcnn'): os.makedirs('./train_logs/textcnn')

        # 开始训练
        train_steps = 0
        best_val_acc = 0
        # summary
        tf.summary.scalar('val_loss', self.loss)
        tf.summary.scalar('val_acc', self.acc)
        merged = tf.summary.merge_all()

        # 初始化变量
        sess = tf.Session()
        writer = tf.summary.FileWriter('./train_logs/textcnn', sess.graph)
        saver = tf.train.Saver(max_to_keep=10)
        sess.run(tf.global_variables_initializer())

        for i in range(self.epoch):
            batch_train = self.batch_iter(train_x, train_y, batch_size)
            for batch_x, batch_y in batch_train:
                train_steps += 1
                feed_dict = {self.inputs: batch_x, self.targets: batch_y}
                _, train_loss, train_acc = sess.run([self.optim, self.loss, self.acc], feed_dict=feed_dict)

                if train_steps % 1000 == 0:
                    feed_dict = {self.inputs: val_x, self.targets: val_y}
                    val_loss, val_acc = sess.run([self.loss, self.acc], feed_dict=feed_dict)

                    summary = sess.run(merged, feed_dict=feed_dict)
                    writer.add_summary(summary, global_step=train_steps)

                    if val_acc >= best_val_acc:
                        best_val_acc = val_acc
                        saver.save(sess, "./saves/textcnn/", global_step=train_steps)

                    msg = 'epoch:%d/%d,train_steps:%d,train_loss:%.4f,train_acc:%.4f,val_loss:%.4f,val_acc:%.4f'
                    print(msg % (i, self.epoch, train_steps, train_loss, train_acc, val_loss, val_acc))

        sess.close()
6.实现生成batch函数
    def batch_iter(self, x, y, batch_size=32, shuffle=True):
        """
        生成batch数据
        :param x: 训练集特征变量
        :param y: 训练集标签
        :param batch_size: 每个batch的大小
        :param shuffle: 是否在每个epoch时打乱数据
        :return:
        """
        data_len = len(x)
        num_batch = int((data_len - 1) / batch_size) + 1

        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_len))
            x_shuffle = x[shuffle_indices]
            y_shuffle = y[shuffle_indices]
        else:
            x_shuffle = x
            y_shuffle = y
        for i in range(num_batch):
            start_index = i * batch_size
            end_index = min((i + 1) * batch_size, data_len)
            yield (x_shuffle[start_index:end_index], y_shuffle[start_index:end_index])
7.实现预测函数
    def predict(self, x):
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state('./saves/textcnn/')
        saver.restore(sess, ckpt.model_checkpoint_path)

        feed_dict = {self.inputs: x}
        logits = sess.run(self.logits, feed_dict=feed_dict)
        y_pred = np.argmax(logits, 1)
        return y_pred
  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是基于PyTorch框架实现TextCNN模型代码,用于文本分类: ```python import torch import torch.nn as nn import torch.nn.functional as F class TextCNN(nn.Module): def __init__(self, vocab_size, embedding_dim, num_classes, num_filters, filter_sizes, dropout_prob): super(TextCNN, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.convs = nn.ModuleList([ nn.Conv2d(in_channels=1, out_channels=num_filters, kernel_size=(fs, embedding_dim)) for fs in filter_sizes ]) self.dropout = nn.Dropout(dropout_prob) self.fc = nn.Linear(num_filters * len(filter_sizes), num_classes) def forward(self, x): x = self.embedding(x) # (batch_size, seq_len, embedding_dim) x = x.unsqueeze(1) # (batch_size, 1, seq_len, embedding_dim) x = [F.relu(conv(x)).squeeze(3) for conv in self.convs] # [(batch_size, num_filters, seq_len - filter_size + 1), ...] x = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in x] # [(batch_size, num_filters), ...] x = torch.cat(x, 1) # (batch_size, num_filters * len(filter_sizes)) x = self.dropout(x) logits = self.fc(x) return logits ``` 其中,`vocab_size`表示词汇表大小,`embedding_dim`表示词向量维度,`num_classes`表示分类数量,`num_filters`表示卷积核数量,`filter_sizes`表示卷积核尺寸列表,`dropout_prob`表示dropout概率。在`forward`函数中,先使用`embedding`层将输入的词id转换为词向量,然后进行卷积和池化操作,最后通过全连接层输出分类结果。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值