from tensorflow import keras
import numpy as np
from tensorflow.keras import layers
import random
import sys
# path = keras.utils.get_file(
# 'nietzsche.txt',
# origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
# text = open(path).read().lower()
# print('Corpus length:', len(text))
path = keras.utils.get_file('real_test.txt', origin='.keras.datasets')
text = open(path, encoding='utf-8').read().lower()
print('Corpus length:', len(text))
maxlen = 60
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i +maxlen])
next_chars.append(text[i + maxlen])
#print(sentences[0])
#print(next_chars[0])
print('Number of sequences:', len(sentences))
chars = sorted(list(set(text)))
#print(chars)
#['\n', ' ', '-', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
print('Unique characers:', len(chars))
#声明一个字典,将唯一的字符映射为它在列表chars中的索引
char_indices = dict((char, chars.index(char)) for char in chars)
#print(char_indices)
print('Vectorization...')
#print(next_chars)
print(len(sentences), maxlen, len(chars))
x = np.zeros((len(sentences), maxlen, len(chars)),dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
#print(i, sentence, t, char, char_indices[char])
x[i, t, char_indices[char]] = 1
#print(i,next_chars[i], char_indices[next_chars[i]])
#y是第i行这60个字母的下一个字母,也就是我们希望得到的输出
#将第i行中这个字母在字典里的排列序号置为1,如下一个字母是i,则i在字典里对应是11号,将[0,11]置位一
y[i, char_indices[next_chars[i]]] = 1
model = keras.models.Sequential()
model.add(layers.LSTM(128,input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
#目标经过one-hot编码,因此用categorical_crossentropy
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds,temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
for epoch in range(1, 60):
print('epoch:', epoch)
model.fit(x, y, batch_size=128, epochs=1)
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print('---Generating with seed:"' + generated_text +'"')
for temperature in [0.5, 0.6]:
print('------temperature:', temperature)
sys.stdout.write(generated_text)
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
复现代码,笔记已经记在注释中了。
其中:
学校让写的那个学习强国总结,用keras.lstm自动生成文章实现了以下,效果如下:
今天把深度学习四大名著之一的python深度学习看完了,终于又迈进去一根脚趾头。