使用循环神经网络实现语言模型——源自《TensorFlow:实战Goole深度学习框架》

# -*- coding: utf-8 -*-
import numpy as np
from mytensorflowmodel import reader
import tensorflow as tf

DATA_PATH = "E:/learning/simple-examples/data"
HIDDEN_SIZE = 200
NUM_LAYERS = 2
VOCAB_SIZE = 10000

LEARNING_RATE = 1.0
TRAIN_BATCH_SIZE = 20
TRAIN_NUM_STEP = 35

EVAL_BATCH_SIZE = 1
EVAL_NUM_STEP = 1
NUM_EPOCH = 2
KEEP_PROB = 0.5
MAX_GRAD_NORM = 5

class PTBModel(object):
    def __init__(self,is_training,batch_size,num_steps):
        #batch大小和截断长度
        self.batch_size = batch_size
        self.num_steps = num_steps

        self.input_data = tf.placeholder(tf.int32,[batch_size,num_steps])
        self.targets = tf.placeholder(tf.int32,[batch_size,num_steps])

        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
        if is_training:
            lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell,output_keep_prob=KEEP_PROB)
        cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * NUM_LAYERS)

        self.initial_state = cell.zero_state(batch_size,tf.float32)
        embedding = tf.get_variable("embedding",[VOCAB_SIZE,HIDDEN_SIZE])
        inputs = tf.nn.embedding_lookup(embedding,self.input_data)
        if is_training:inputs = tf.nn.dropout(inputs,KEEP_PROB)
        outputs = []
        state = self.initial_state
        with tf.variable_scope("RNN"):
            for time_step in range(num_steps):
                if time_step > 0:tf.get_variable_scope().reuse_variables()
                cell_output,state = cell(inputs[:,time_step,:],state)
                outputs.append(cell_output)

        output = tf.reshape(tf.concat(outputs,1),[-1,HIDDEN_SIZE])

        weight = tf.get_variable("weight",[HIDDEN_SIZE,VOCAB_SIZE])
        bias = tf.get_variable("bias",[VOCAB_SIZE])
        logits = tf.matmul(output,weight) + bias

        loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits],[tf.reshape(self.targets,[-1])],
                                                      [tf.ones([batch_size * num_steps],dtype=tf.float32)])
        self.cost = tf.reduce_sum(loss)/batch_size
        self.final_state = state

        if not is_training: return
        trainable_variables = tf.trainable_variables()
        grads,_=tf.clip_by_global_norm(tf.gradients(self.cost,trainable_variables),MAX_GRAD_NORM)

        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        self.train_op = optimizer.apply_gradients(zip(grads,trainable_variables))
def run_epoch(session,model,data,train_op,output_log):
    total_costs = 0.0
    iters = 0
    state = session.run(model.initial_state)
    for step,(x,y) in enumerate(reader.ptb_iterator(data,model.batch_size,model.num_steps)):
        cost,state,_=session.run([model.cost,model.final_state,train_op],{model.input_data:x,model.targets:y,model.initial_state:state})
        total_costs += cost
        iters += model.num_steps
        if output_log and step % 100 == 0:
            print("After %d steps,perplexity is %.3f" % (step, np.exp(total_costs / iters)))
    return np.exp(total_costs / iters)
def main(_):
    train_data, valid_data, test_data,_ = reader.ptb_raw_data(DATA_PATH)
    initializer = tf.random_uniform_initializer(-0.05,0.05)
    with tf.variable_scope("language_model",reuse=None,initializer=initializer):
        train_model = PTBModel(True,TRAIN_BATCH_SIZE,TRAIN_NUM_STEP)
    with tf.variable_scope("language_model",reuse=True,initializer=initializer):
        eval_model = PTBModel(False,EVAL_BATCH_SIZE,EVAL_NUM_STEP)
    with tf.Session() as session:
        tf.initialize_all_variables().run()
        for i in range(NUM_EPOCH):
            print("In iteration: %d" % (i+1))
            run_epoch(session,train_model,train_data,train_model.train_op,True)
            valid_perplexity = run_epoch(session,eval_model,valid_data,tf.no_op(),False)
            print("Epoch: %d Validation Perplexity: %.3f" % (i+1,valid_perplexity))
        test_perplexity = run_epoch(session,eval_model,test_data,tf.no_op(),False)
        print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
    tf.app.run()

    作者使用的是tensorflow1.0的版本,和现用版本有些地方并不适应,其中大部分错误可以参见博客https://blog.csdn.net/White_Idiot/article/details/78881261

但在处理错误“module 'reader' has no attribute 'ptb_iterator'”时,我在reader文件中加入了ptb_iterator()的源码,代码如下

def ptb_iterator(raw_data, batch_size, num_steps):
  """Iterate on the raw PTB data.
  This generates batch_size pointers into the raw PTB data, and allows
  minibatch iteration along these pointers.
  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
  Yields:
    Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
    The second element of the tuple is the same data time-shifted to the
    right by one.
  Raises:
    ValueError: if batch_size or num_steps are too high.
  """
  raw_data = np.array(raw_data, dtype=np.int32)

  data_len = len(raw_data)
  batch_len = data_len // batch_size
  data = np.zeros([batch_size, batch_len], dtype=np.int32)
  for i in range(batch_size):
    data[i] = raw_data[batch_len * i:batch_len * (i + 1)]

  epoch_size = (batch_len - 1) // num_steps

  if epoch_size == 0:
    raise ValueError("epoch_size == 0, decrease batch_size or num_steps")

  for i in range(epoch_size):
    x = data[:, i*num_steps:(i+1)*num_steps]
    y = data[:, i*num_steps+1:(i+1)*num_steps+1]
    yield (x, y)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值