链接:https://pan.baidu.com/s/1Nxty7ntGSUUA18oa_-ORSQ
提取码:in9m
cifar10数据集百度云链接
从本地加载图片
import os
import numpy as np
import pickle as p
def load_CIFAR_batch(filename):
with open(filename,'rb')as f:
data_dict = p.load(f,encoding='bytes')
images = data_dict[b'data']
labels =data_dict[b'labels']
images = images.reshape(10000,3,32,32)
images = images.transpose(0,2,3,1)
labels = np.array(labels)
return images,labels
def load_CIFAR_data(data_dir):
images_train=[]
labels_train=[]
for i in range(5):
f = os.path.join(data_dir,'data_batch_%d'%(i+1))
print('loading',f)
images_batch,label_batch = load_CIFAR_batch(f)
images_train.append(images_batch)
labels_train.append(label_batch)
Xtrain = np.concatenate(images_train)
Ytrain = np.concatenate(labels_train)
del images_batch,label_batch
Xtest,Ytest = load_CIFAR_batch(os.path.join(data_dir,'test_batch'))
print('finished loadding CIFAR-10 data')
return Xtrain,Ytrain,Xtest,Ytest
data_dir = 'D:\cat_dog\cifar-10-python\cifar-10-batches-py'
train_images, train_labels, test_images, test_labels = load_CIFAR_data(data_dir)
train_images, test_images = train_images/255, test_images/255
数据集数据类型转换一下
import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential
from tensorflow import keras
#转换之前的类型
train_labels[0].dtype,train_images[0].dtype
#输出(dtype('int32'), dtype('float64'))
#转换为float32的
train_images, test_images = tf.cast(train_images,dtype=tf.float32),tf.cast(test_images,dtype=tf.float32)
VGG13模型
def VGG13_inference(in_shape):
model = keras.Sequential(name='VGG13')
#unit1
model.add(layers.Conv2D(64,(3,3),input_shape=(in_shape[1],in_shape[2],in_shape[3]),
strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.Conv2D(64,(3,3),input_shape=(in_shape[1],in_shape[2],in_shape[3]),
padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides=2,padding='same'))
#unit2
model.add(layers.Conv2D(128,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.Conv2D(128,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides=2, padding='same'))
#unit3
# model.add(layers.Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
# model.add(layers.Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
# model.add(layers.MaxPooling2D(pool_size=(2,2),strides=2, padding='same'))
# #unit4
# model.add(layers.Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
# model.add(layers.Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
# model.add(layers.MaxPooling2D(pool_size=(2,2),strides=2, padding='same'))
# #unit5
# model.add(layers.Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
# model.add(layers.Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
# model.add(layers.MaxPooling2D(pool_size=(2,2),strides=2, padding='same'))
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
# model.add(layers.Dropout(0.5))
model.add(layers.Dense(128,activation='relu'))
# model.add(layers.Dropout(0.5))
model.add(layers.Dense(10,activation='softmax'))
model.compile(optimizer=keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy', #不能直接用函数,否则在与测试加载模型不成功!
metrics=['accuracy'])
model.summary()
return model
x_shape = train_images.shape
VGG13_model = VGG13_inference(x_shape)
totall_epochs = 0
epochs = 10
history = VGG13_model.fit(train_images, train_labels, batch_size=64, epochs=epochs, validation_split=0.3)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training', 'valivation'], loc='upper left')
plt.show()
res = VGG13_model.evaluate(test_images, test_labels)
print(res)
unit3、4、5被注释掉了,因为实验发现处理cifar10用不了这么深的层,反而准确率会很低很低。
测试集结果
这个网络还是可以滴。