实现思路
- 导入所需要的包
- 下载/加载数据集(此处用的是Mnist)
- 数据集预处理(转化为张量,one-hot编码,分配batch大小,)
- 设置网络结构(层数,激活函数,层与层之间的连接方法)
- 采用随机梯度下降优化参数
- 最小化(梯度下降)损失函数(MSE),并将更新的参数代替原来的参数
- 进行多次训练
代码
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu,True)
(x, y), (x_val, y_val) = datasets.mnist.load_data()
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
y = tf.one_hot(y, depth=10)
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
train_dataset = train_dataset.batch(200)
model = keras.Sequential([
layers.Dense(512, activation='relu'),
layers.Dense(256, activation='relu'),
layers.Dense(10)
])
optimizers = optimizers.SGD(learning_rate=0.001)
def train(epoch):
for step, (x, y) in enumerate(train_dataset):
with tf.GradientTape() as tape:
x = tf.reshape(x, (-1, 28*28))
out = model(x)
loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0]
grades = tape.gradient(loss, model.trainable_variables)
optimizers.apply_gradients(zip(grades, model.trainable_variables))
if step % 100 == 0:
print(epoch, step, 'loss is', loss.numpy())
def main():
for epoch in range(30):
train(epoch)
main()
参考链接