import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
(train_image,train_label),(test_image,test_label)=tf.keras.datasets.fashion_mnist.load_data()
train_image=train_image/255
test_image=test_image/255 #归一化
ds_train_image=tf.data.Dataset.from_tensor_slices(train_image) #将数据经过tf.data模块进行切片
ds_train_label=tf.data.Dataset.from_tensor_slices(train_label)
ds_train=tf.data.Dataset.zip((ds_train_image,ds_train_label))#将图片和标签进行整合在一个元组中
ds_train=ds_train.shuffle(10000).repeat().batch(64) #训练数据打乱,无限重复,确定batch的大小
ds_test_image=tf.data.Dataset.from_tensor_slices(train_image)
ds_test_label=tf.data.Dataset.from_tensor_slices(train_label)
ds_test=tf.data.Dataset.zip((ds_test_image,ds_test_label))
ds_test=ds_test.batch(64) #测试数据不需要打乱,只需要batch
model=tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28,28))) 输入的是28*28的图片
model.add(tf.keras.layers.Dense(128,activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(128,activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10,activation='softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['acc'])
#一个batch有64个数据,一个epoch就就需要原始数据量/64个batch。
steps_per_epochs=train_image.shape[0]//64
history=model.fit(
ds_train,
epochs=5,
steps_per_epoch=steps_per_epochs,
validation_data=ds_test)
Tensorflow2.0基础-笔记- 利用tf.data修改mnist多分类任务
最新推荐文章于 2024-08-09 10:59:47 发布