jyupter notebook上编写
from tensorflow.python.keras.datasets import imdb
import numpy as np
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# num_words=10000 单词的索引最大不会超过10000,只选取常用单词的前10000个
print(train_data.shape, '\n')
print(train_data[0], end='')# 第一条评论的单词索引
print('\n')
print(train_labels[0])
print('\n')
print(max([max(sequence) for sequence in train_data])) # 最大单词的索引只到9999
"""准备数据:
不能将整数序列直接输入神经网络,需要将列表转化为张量。
(1)填充列表,使其具有相同的长度,再将列表转化为(samples,word_indices))
(2)one-hot编码,将列表转化为0,1组成的向量
这里采用(2)"""
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))# 创建一个形状为(len(sequenses), dimension))的零矩阵
for i, sequence in enumerate(sequences):
results[i, sequence] = 1 # 将results的指定索引设置为1
return results
x_train = vectorize_sequences(train_data) # 训练数据向量化
x_test = vectorize_sequences(test_data) # 测试数据向量化
x_train[0] # one-hot化了
"""标签向量化,标签值本来就是0,1"""
t_train = np.asarray(train_labels).astype('float32')
t_test = np.asarray(test_labels).astype('float32')
# 现在可以将数据输入到神经网络里了
"""构建网络"""
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000, )))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid')) # 输出一个0~1的概率
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# 配置自定义的优化器 model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
# model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy])
"""留出验证集:
需要将原始训练数据留出10000个样本作为验证集"""
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
t_val = t_train[:10000]
partial_t_train = t_train[10000:]
"""训练模型
history是一个字典"""
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(partial_x_train,
partial_t_train,
epochs=20,
batch_size=512,
validation_data=(x_val, t_val)) # x_val训练集,t_val验证集
history_dict = history.history
print(history_dict.keys())
"""绘制训练损失和验证损失"""
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss') # 'bo'代表蓝色圆圈
plt.plot(epochs, val_loss_values, 'b', label='Validation loss') # 'b'代表蓝色实线
plt.xlabel('epochs')
plt.ylabel('loss')
plt.title('训练损失和验证损失')
plt.legend()
plt.show()
# 结果过拟合了
"""绘制训练精度和验证精度"""
acc = history_dict['acc']
val_acc = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# 过拟合
"""使用训练好的网络在新数据上生成预测结果"""
model.predict(x_test)
# 有些分了结果非常确信,有些则不那么确信