bert pytorch 序列标注_CNN做序列标注问题(tensorflow)

该博客展示了如何使用TensorFlow构建一个简单的CNN模型进行序列标注。通过定义卷积层、全连接层和softmax激活来处理序列数据。训练过程中用到了Adam优化器,并通过matplotlib实时显示训练过程。
摘要由CSDN通过智能技术生成

一、搭建简单的CNN做序列标注代码

import tensorflow as tf

import numpy as np

import matplotlib.pyplot as plt

TIME_STEPS = 15# backpropagation through time 的time_steps

BATCH_SIZE = 1#50

INPUT_SIZE = 1 # x数据输入size

LR = 0.05 # learning rate

num_tags = 2

# 定义一个生成数据的 get_batch function:

def get_batch():

xs = np.array([[[[2], [3], [4], [5], [5], [5], [1], [5], [3], [2], [5], [5], [5], [3], [5]]]])

res = np.array([[0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1]])

ys = np.zeros([1,TIME_STEPS,2])

for i in range(TIME_STEPS):

if(res[0,i] == 0):

ys[0,i,0] = 1

ys[0,i,1] = 0

else:

ys[0,i,0] = 0

ys[0,i,1] = 1

return [xs, res,ys]

# 定义 CNN 的主体结构

class CNN(object):

def __init__(self, n_steps, input_size, num_tags, batch_size):

self.n_steps = n_steps

self.input_size = input_size

self.num_tags = num_tags

self.batch_size = batch_size

#卷积神将网络的输入:[batch, in_height, in_width, in_channels],在自然语言处理中height为1

self.xs = tf.placeholder(tf.float32, [self.batch_size,1, self.n_steps, self.input_size], name='xs')

#做序列标注,第二维对应好输入的n_steps,相当于每个时刻的输入都有一个输出

self.ys = tf.placeholder(tf.int32, [self.batch_size, self.n_steps,self.num_tags], name='ys')#

self.featureNum = 10#提取10个特征

#[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]

W_conv1 = self.weight_variable([1,3,1,self.featureNum])#提取10个特征

#对应10个卷积核输出

b_conv1 = self.bias_varibale([self.featureNum])

#卷积操作

layer_conv1 = tf.nn.conv2d(self.xs, W_conv1,strides=[1, 1, 1, 1],padding="SAME",) + b_conv1

#激励层

layer_conv1 = tf.nn.relu(layer_conv1)

#最大值池化 本处去除池化层为了后续计算简便

#layer_pool1 = tf.nn.max_pool(layer_conv1,

# [1, 1, 3, 1],[1,1,1,1],padding='VALID')

layer_pool1 = layer_conv1

# 全连接层 映射到self.n_steps x self.num_tags

layer_pool1 = tf.reshape(layer_pool1,[self.n_steps,self.featureNum])

W_fc1 = self.weight_variable([self.featureNum,self.num_tags])

b_fc1 = self.bias_varibale([self.num_tags])

h_fc1 = tf.matmul(layer_pool1, W_fc1) + b_fc1

#激励层

h_fc1 = tf.nn.relu(h_fc1)

#softmax 归一化

self.y_conv = tf.nn.softmax(h_fc1)

self.label = tf.reshape(self.ys,[self.n_steps,2])

self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.label, logits=self.y_conv))

#梯度下降

self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)

self.pred = tf.argmax(self.y_conv,axis = 1)

def weight_variable(self,shape):

initial=tf.truncated_normal(shape, mean=0.0, stddev=0.1)

return tf.Variable(initial)

def bias_varibale(self,shape):

initial=tf.constant(0,1,shape=shape)

return tf.Variable(initial)

# 训练CNN

if __name__ == '__main__':

# 搭建 CNN 模型

model = CNN(TIME_STEPS, INPUT_SIZE, num_tags, BATCH_SIZE)

sess = tf.Session()

sess.run(tf.global_variables_initializer())

# matplotlib可视化

plt.ion() # 设置连续 plot

plt.show()

# 训练多次

for i in range(150):

xs, res,ys = get_batch() # 提取 batch data

# 初始化 data

feed_dict = {

model.xs: xs,

model.ys: ys,

}

# 训练

_, cost,pred = sess.run(

[model.train_op, model.cost, model.pred],

feed_dict=feed_dict)

# plotting

x = xs.reshape(-1,1)

r = res.reshape(-1, 1)

p = pred.reshape(-1, 1)

x = range(len(x))

plt.clf()

plt.plot(x, r, 'r', x, p, 'b--')

plt.ylim((-1.2, 1.2))

plt.draw()

plt.pause(0.3) # 每 0.3 s 刷新一次

# 打印 cost 结果

if i % 20 == 0:

print('cost: ', round(cost, 4))

得到结果:

二、CNN主要知识点

待整理。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值