import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential
def preprocess(x, y):
x = tf.cast(x, dtype = tf.float32) / 255.
y = tf.cast(y, dtype = tf.int32)
return x, y
batchsize = 128
(x_train, y_train), (x_test, y_test) = datasets.cifar100.load_data()
y_train = tf.squeeze(y_train, axis=1)
y_test = tf.squeeze(y_test, axis=1)
traindb = tf.data.Dataset.from_tensor_slices((x_train, y_train))
traindb = traindb.shuffle(6000).map(preprocess).batch(batchsize)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.map(preprocess).batch(batchsize)
conv_layers = [
#unit1
layers.Conv2D(64, kernel_size = [3, 3], padding = 'same', activation = tf.nn.relu),
layers.Conv2D(64, kernel_size = [3, 3], padding = 'same', activation = tf.nn.relu),
layers.MaxPool2D(pool_size = [2, 2], strides = 2, padding = 'same'),
# unit2
layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit3
layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit4
layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit5
layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
]
def main():
conv_net = Sequential(conv_layers)
fc_net = Sequential([
layers.Dense(256, activation = tf.nn.relu),
layers.Dense(128, activation = tf.nn.relu),
layers.Dense(100),
])
conv_net.build(input_shape = [None, 32, 32, 3])
fc_net.build(input_shape = [None, 512])
optimizer = optimizers.Adam(lr = 3e-4)
variables = conv_net.trainable_variables + fc_net.trainable_variables
for epoch in range(50):
for step, (x, y) in enumerate(traindb):
with tf.GradientTape() as tape:
out = conv_net(x)
out = tf.reshape(out, [-1, 512])
logits = fc_net(out)
y_onehot = tf.one_hot(y, depth = 100)
loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits = True)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
if step % 100 == 0:
print(epoch, 'loss:', float(loss))
total_num = 0
total_correct = 0
for (x, y) in test_db:
out = conv_net(x)
out = tf.reshape(out, [-1, 512])
logits = fc_net(out)
prob = tf.nn.softmax(logits, axis =1)
pred = tf.cast(tf.argmax(prob, axis = 1), dtype = tf.int32)
total_correct += tf.cast(tf.equal(pred, y), dtype = tf.int32)
total_correct = int(tf.reduce_sum(total_correct))
total_num += x.shpe[0]
acc = total_correct / total_num
print('acc:', acc)
if __name__ == '__main__':
main()
Tensorflow卷积神经网络Cifar100实战
最新推荐文章于 2024-05-24 01:00:00 发布