链接:https://pan.baidu.com/s/1Nxty7ntGSUUA18oa_-ORSQ
提取码:in9m
cifar10数据集百度云链接
从本地加载图片
import os
import numpy as np
import pickle as p
def load_CIFAR_batch(filename):
with open(filename,'rb')as f:
data_dict = p.load(f,encoding='bytes')
images = data_dict[b'data']
labels =data_dict[b'labels']
images = images.reshape(10000,3,32,32)
images = images.transpose(0,2,3,1)
labels = np.array(labels)
return images,labels
def load_CIFAR_data(data_dir):
images_train=[]
labels_train=[]
for i in range(5):
f = os.path.join(data_dir,'data_batch_%d'%(i+1))
print('loading',f)
images_batch,label_batch = load_CIFAR_batch(f)
images_train.append(images_batch)
labels_train.append(label_batch)
Xtrain = np.concatenate(images_train)
Ytrain = np.concatenate(labels_train)
del images_batch,label_batch
Xtest,Ytest = load_CIFAR_batch(os.path.join(data_dir,'test_batch'))
print('finished loadding CIFAR-10 data')
return Xtrain,Ytrain,Xtest,Ytest
data_dir = 'D:\cat_dog\cifar-10-python\cifar-10-batches-py'
train_images, train_labels, test_images, test_labels = load_CIFAR_data(data_dir)
train_images, test_images = train_images / 255.0, test_images / 255.0
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
展示图片
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i]])
plt.show()
建个AlexNet模型
为了适合训练cifar10,稍微修改了一下参数。
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
def AlexNet_inference(in_shape):
model = keras.Sequential(name='AlexNet')
# model.add(layers.Conv2D(96,(11,11),strides=(4,4),input_shape=(in_shape[1],in_shape[2],in_shape[3]),
# padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.Conv2D(96,(3,3),strides=(2,2),input_shape=(in_shape[1],in_shape[2],in_shape[3]),
padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Conv2D(384,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.Conv2D(384,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(2048,activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(2048,activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10,activation='softmax'))
model.compile(optimizer=keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy', #不能直接用函数,否则在与测试加载模型不成功!
metrics=['accuracy'])
model.summary()
return model
x_shape = train_images.shape
AlexNet_model = AlexNet_inference(x_shape)
totall_epochs = 0
epochs = 10
history = AlexNet_model.fit(train_images, train_labels, batch_size=64, epochs=epochs, validation_split=0.1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training', 'valivation'], loc='upper left')
plt.show()
res = AlexNet_model.evaluate(test_images, test_labels)
print(res)
继续训练10epoch
history = AlexNet_model.fit(train_images, train_labels, batch_size=64, epochs=epochs, validation_split=0.3)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training', 'valivation'], loc='upper left')
plt.show()
res = AlexNet_model.evaluate(test_images, test_labels)
print(res)
结果很优秀啦