循环神经网络源码剖析

19 篇文章 0 订阅
度学习领域的 RNN(Recurrent Neural Networks) 中文名又称之为:循环神经网络。
# -*- coding: utf-8 -*-

import numpy as np
import codecs

#读取txt一整个文件的内容为字符串str类型
data = open('text.txt', 'r').read()
#去除重复的字符
chars = list(set(data))print chars
#打印源文件中包含的字符个数、去重后字符个数
data_size, vocab_size = len(data), len(chars)
print 'data has %d characters, %d unique.' % (data_size, vocab_size)
#创建字符的索引表
char_to_ix={ ch:i for i,ch in enumerate(chars) }
ix_to_char={ i:ch for i,ch in enumerate(chars) }
print char_to_ix
# 隐藏层神经元个数
hidden_size = 100 seq_length = 20 
#学习率
learning_rate = 1e-1

##网络模型##
# 输入层到隐藏层
Wxh=np.random.randn(hidden_size,vocab_size)*0.01
# 隐藏层与隐藏层
Whh=np.random.randn(hidden_size,hidden_size)*0.01
# 隐藏层到输出层,输出层预测的是每个字符的概率
Why=np.random.randn(vocab_size,hidden_size)*0.01
#隐藏层偏置项
bh = np.zeros((hidden_size, 1))
#输出层偏置项
by = np.zeros((vocab_size, 1))
#inputs  t时刻序列,也就是相当于输入
#targets t+1时刻序列,也就是相当于输出
#hprev t-1时刻的隐藏层神经元激活值
def lossFun(inputs, targets, hprev):
  xs, hs, ys, ps = {}, {}, {}, {}
  hs[-1] = np.copy(hprev)
  loss = 0
  #前向传导
  for t in xrange(len(inputs)):
    #把输入编码成0、1格式,在input中,为0代表此字符未激活
    xs[t] = np.zeros((vocab_size,1))
    xs[t][inputs[t]] = 1
    # RNN的隐藏层神经元激活值计算
    hs[t] = np.tanh(np.dot(Wxh, xs[t]) 
            + np.dot(Whh, hs[t-1]) + bh)
    # RNN的输出
    ys[t] = np.dot(Why, hs[t]) + by
    # 概率归一化
    ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))
    # softmax 损失函数
    loss += -np.log(ps[t][targets[t],0])
  #反向传播
  dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
  dbh, dby = np.zeros_like(bh), np.zeros_like(by)
  dhnext = np.zeros_like(hs[0])
  for t in reversed(xrange(len(inputs))):
    dy = np.copy(ps[t])
    # backprop into y
    dy[targets[t]] -= 1
    dWhy += np.dot(dy, hs[t].T)
    dby += dy
    # backprop into h
    dh = np.dot(Why.T, dy) + dhnext
    # backprop through tanh nonlinearity
    dhraw = (1 - hs[t] * hs[t]) * dh 
    dbh += dhraw
    dWxh += np.dot(dhraw, xs[t].T)
    dWhh += np.dot(dhraw, hs[t-1].T)
    dhnext = np.dot(Whh.T, dhraw)
  for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
    # clip to mitigate exploding gradients
    np.clip(dparam, -5, 5, out=dparam)
  return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]

#预测函数,用于验证,给定seed_ix为t=0时刻的字符索引,生成预测后面的n个字符
def sample(h, seed_ix, n):
  x = np.zeros((vocab_size, 1))
  x[seed_ix] = 1
  ixes = []  for t in xrange(n):
    #h是递归更新的
    h = np.tanh(np.dot(Wxh, x)+np.dot(Whh, h)+bh)
    y = np.dot(Why, h) + by
    p = np.exp(y) / np.sum(np.exp(y))
    #根据概率大小挑选
    ix = np.random.choice(range(vocab_size), 
                                p=p.ravel())
    #更新输入向量
    x = np.zeros((vocab_size, 1))
    x[ix] = 1
    #保存序列索引
    ixes.append(ix)  return ixes

n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
# memory variables for Adagrad
mbh, mby = np.zeros_like(bh), np.zeros_like(by) 
# loss at iteration 0
smooth_loss = -np.log(1.0/vocab_size)*seq_length 

while n<20000:
  #n表示迭代网络迭代训练次数。当输入是t=0时刻时,它前一时刻的隐藏层神经元的激活值我们设置为0
  if p+seq_length+1 >= len(data) or n == 0: 
    hprev = np.zeros((hidden_size,1)) #
    # go from start of data 
    p = 0 
  #输入与输出
  inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
  targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
  #当迭代了1000次,
  if n % 1000 == 0:
    sample_ix = sample(hprev, inputs[0], 200)
    txt = ''.join(ix_to_char[ix] for ix in sample_ix)
    print '----\n %s \n----' % (txt, )
    
  # RNN前向传导与反向传播,获取梯度值
  loss, dWxh, dWhh, dWhy, dbh, dby, hprev = 
              lossFun(inputs, targets, hprev)
  smooth_loss = smooth_loss*0.999+loss*0.001
  # print progress
  if n % 100 == 0: print 'iter %d, loss: %f' % (n, smooth_loss)

  # 采用Adagrad自适应梯度下降法,可参看博文:
  #http://blog.csdn.net/danieljianfeng/article/details/42931721
  for param, dparam, mem in zip(
      [Wxh, Whh, Why, bh, by], 
      [dWxh, dWhh, dWhy, dbh, dby], 
      [mWxh, mWhh, mWhy, mbh, mby]):
    mem += dparam * dparam
    #自适应梯度下降公式
    param += -learning_rate * dparam / np.sqrt(mem + 1e-8) 
  p += seq_length #批量训练
  n += 1 #记录迭代次数

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值