生成歌词多层的lstam

import os
import sys
import time

import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.contrib import rnn as rnn_cell
from tensorflow.contrib import legacy_seq2seq as seq2seq
import random

class HParam():

    batch_size = 32
    n_epoch = 100
    learning_rate = 0.01
    decay_steps = 1000
    decay_rate = 0.9
    grad_clip = 5

    state_size = 100
    num_layers = 3
    seq_length = 20
    log_dir = './logs'
    metadata = 'metadata.tsv'
    gen_num = 500 # how many chars to generate


class DataGenerator():

    def __init__(self, datafiles, args):
        self.seq_length = args.seq_length
        self.batch_size = args.batch_size
        with open(datafiles, encoding='utf-8') as f:
            self.data = f.read()

        self.total_len = len(self.data)  # total data length
        self.words = list(set(self.data))
        self.words.sort()
        # vocabulary
        self.vocab_size = len(self.words)  # vocabulary size
        print('Vocabulary Size: ', self.vocab_size)
        self.char2id_dict = {w: i for i, w in enumerate(self.words)}
        self.id2char_dict = {i: w for i, w in enumerate(self.words)}

        # pointer position to generate current batch
        self._pointer = 0

        # save metadata file
        self.save_metadata(args.metadata)



    def char2id(self, c):
        return self.char2id_dict[c]

    def id2char(self, id):
        return self.id2char_dict[id]

    def save_metadata(self, file):
        with open(file, 'w') as f:
            f.write('id\tchar\n')
            for i in range(self.vocab_size):
                c = self.id2char(i)
                f.write('{}\t{}\n'.format(i, c))

    def next_batch(self):

        x_batches = []
        y_batches = []
        for i in range(self.batch_size):
            if self._pointer + self.seq_length + 1 >= self.total_len:
                self._pointer = 0
            bx = self.data[self._pointer: self._pointer + self.seq_length]
            by = self.data[self._pointer +
                           1: self._pointer + self.seq_length + 1]
            self._pointer += self.seq_length  # update pointer position

            # convert to ids
            bx = [self.char2id(c) for c in bx]
            by = [self.char2id(c) for c in by]



            x_batches.append(bx)
            y_batches.append(by)
        # print("维度:",x_batches)
        #     shuffle_indices = np.random.permutation(np.arange(len(x_batches)))
        #     x_batches = x_batches[shuffle_indices]
        #     y_batches = y_batches[shuffle_indices]
        return x_batches, y_batches


class Model():
    """
    The core recurrent neural network model.
    """

    def __init__(self, args, data, infer=False):
        if infer:
            args.batch_size = 1
            args.seq_length = 1
        with tf.name_scope('inputs'):
            self.input_data = tf.placeholder(
                tf.int32, [args.batch_size, args.seq_length])
            self.target_data = tf.placeholder(
                tf.int32, [args.batch_size, args.seq_length])

        with tf.name_scope('model'):
            self.cell = rnn_cell.BasicLSTMCell(args.state_size)
            self.cell = rnn_cell.MultiRNNCell([self.cell] * args.num_layers)
            self.initial_state = self.cell.zero_state(
                args.batch_size, tf.float32)
            with tf.variable_scope('rnnlm'):
                w = tf.get_variable(
                    'softmax_w', [args.state_size, data.vocab_size])
                b = tf.get_variable('softmax_b', [data.vocab_size])
                with tf.device("/cpu:0"):
                    embedding = tf.get_variable(
                        'embedding', [data.vocab_size, args.state_size])
                    inputs = tf.nn.embedding_lookup(embedding, self.input_data)
            outputs, last_state = tf.nn.dynamic_rnn(
                self.cell, inputs, initial_state=self.initial_state)

        with tf.name_scope('loss'):
            output = tf.reshape(outputs, [-1, args.state_size])

            self.logits = tf.matmul(output, w) + b
            self.probs = tf.nn.softmax(self.logits)
            self.last_state = last_state

            targets = tf.reshape(self.target_data, [-1])
            self.targets=targets
            loss = seq2seq.sequence_loss_by_example([self.logits],
                                                    [targets],
                                                    [tf.ones_like(targets, dtype=tf.float32)])
            self.losss=loss
            print("loss:",loss)
            self.cost = tf.reduce_sum(loss) / args.batch_size
            tf.summary.scalar('loss', self.cost)

        with tf.name_scope('optimize'):
            self.lr = tf.placeholder(tf.float32, [])
            tf.summary.scalar('learning_rate', self.lr)

            optimizer = tf.train.AdamOptimizer(self.lr)
            tvars = tf.trainable_variables()
            grads = tf.gradients(self.cost, tvars)
            for g in grads:
                tf.summary.histogram(g.name, g)
            grads, _ = tf.clip_by_global_norm(grads, args.grad_clip)

            self.train_op = optimizer.apply_gradients(zip(grads, tvars))
            self.merged_op = tf.summary.merge_all()


def train(data, model, args):
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        writer = tf.summary.FileWriter(args.log_dir, sess.graph)

        # Add embedding tensorboard visualization. Need tensorflow version
        # >= 0.12.0RC0
        config = projector.ProjectorConfig()
        embed = config.embeddings.add()
        embed.tensor_name = 'rnnlm/embedding:0'
        embed.metadata_path = args.metadata
        projector.visualize_embeddings(writer, config)

        max_iter = args.n_epoch * \
            (data.total_len // args.seq_length) // args.batch_size

        for i in range(max_iter):

            learning_rate = args.learning_rate * \
                (args.decay_rate ** (i // args.decay_steps))
            x_batch, y_batch = data.next_batch()
            feed_dict = {model.input_data: x_batch,
                         model.target_data: y_batch, model.lr: learning_rate}

            train_loss, summary, logits, _,_ = sess.run([model.cost, model.merged_op, model.losss,model.last_state, model.train_op],
                                                 feed_dict)
            # print(logits.shape)

            if i % 10 == 0:
                writer.add_summary(summary, global_step=i)
                print('Step:{}/{}, training_loss:{:4f}'.format(i,
                                                               max_iter, train_loss))
            if i % 2000 == 0 or (i + 1) == max_iter:
                saver.save(sess, os.path.join(
                    args.log_dir, 'lyrics_model.ckpt'), global_step=i)


def sample(data, model, args):
    saver = tf.train.Saver()
    with tf.Session() as sess:
        ckpt = tf.train.latest_checkpoint(args.log_dir)
        print(ckpt)
        saver.restore(sess, ckpt)

        # initial phrase to warm RNN
        prime = u'小琴喜欢'
        state = sess.run(model.cell.zero_state(1, tf.float32))

        for word in prime[:-1]:
            x = np.zeros((1, 1))
            x[0, 0] = data.char2id(word)
            feed = {model.input_data: x, model.initial_state: state}
            state = sess.run(model.last_state, feed)
            print("state:",len(state),len(state[0]))

        word = prime[-1]
        lyrics = prime
        for i in range(args.gen_num):
            x = np.zeros([1, 1])
            x[0, 0] = data.char2id(word)
            feed_dict = {model.input_data: x, model.initial_state: state}
            probs, state = sess.run([model.probs, model.last_state], feed_dict)
            p = probs[0]
            word = data.id2char(np.argmax(p))
            print(word, end='')
            sys.stdout.flush()
            time.sleep(0.05)
            lyrics += word
        return lyrics


def main(infer):

    args = HParam()
    data = DataGenerator('JayLyrics.txt', args)
    model = Model(args, data, infer=infer)

    run_fn = sample if infer else train

    run_fn(data, model, args)


if __name__ == '__main__':
    msg = """
    Usage:
    Training: 
        python3 gen_lyrics.py 0
    Sampling:
        python3 gen_lyrics.py 1
    """
    if len(sys.argv) == 2:
        infer = int(sys.argv[-1])
        print('--Sampling--' if infer else '--Training--')
        main(infer)
    else:
        print(msg)
        sys.exit(1)
'''
1, 升级带来的 坑人的小故事:
	#from tensorflow.contrib.rnn import core_rnn_cell as rnn_cell
	rom tensorflow.contrib import rnn as rnn_cell

2, 从名字进入程序。
	有两种模式,
		按0为:  训练
		按1为:   生成文本。
3, 训练模式下的程序运行:
		
		hparam进入。	导入参数信息
		    batch_size = 32
    		batch_size = 32
    		n_poch = 100
   		  learning_rate = 0.01
    		decay_steps = 1000
  		  decay_rate = 0.9
    		grad_clip = 5

   		 state_size = 100
   		 num_layers = 3
    		seq_length = 20
    		log_dir = './logs'
    		metadata = 'metadata.tsv'
    		gen_num = 500 # how many chars to generate
4,
		句子长度20
		batch_size   32
		total _len  :总的单词长度。
		去重复的单词放入list数组,然后排序。
		设置词库,然后 char2id  和id2char 列表。
		
		导出词库。

5,		创建模型
		batch_size  1
		seq_length  1
		
		input	[batch,seq_len]
					32    20    
		target            32 20

		model 
			3层basiclstmcell(100)
			初始化,  32个
		
			w: 100, vocab_size
			b   :   vocab_size

		embedding:  vocab_size   100
		inputs  :  32,20,100
		
					初始化的零 为batch_size的大小。
					由于lstmcell为 100,  下一步的最后一位为100
		outputs  , 20,32, 100



		loss   
			output  :640,100
			logits   (640,100)*(100,vocab_size) +vocab_size
		得到一个  640 ×vocab_size  
		
		probs   然后求出概率, 维度不变。
		last_state  =  last_State(rnn 的第二个输出的东西)
		
		 targets            640
		loss对比    logits (640×vocab) 和 targets(640),tf.ones_like(640)
		loss  得到一个640维的数据

		cost              reduce_sum:加    除以32得到一个值
		
		summary.scalar(cost)
		summary  是一个缓存的保存库。
		优化:



预测:

	model为:
			batch_size 1
			seq_length=1
		input:	1*1
		target:	1*1
		
		model    basiclstmcell(100)
				三层。
				Initial_state  :  batch_size =1
		w:	100*1024
		b:   1024
		inputs:1,1,100
		output:  1,1,100
		
		loss:	1*100
			logits:  1*100  100*1024 +1024
					等于 1×1024
			probs:1×1024
		



		
	获得 ckpt的文件路径。
	在最后的基础上,然后在导入这些数据。
			model的 batch_size :1
					 seq_length :1  
	state 大小为 1的 0向量。
	
	读取	想要预测输入的单词 	从开头到倒数第一个。
			x初始为1大小的0向量。
			feed={x, state=为0的向量。}
			用模型训练的模型,先便利给定的哪些文字,
			然后更具最后的last_state  去预测 新的文字。'''

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值