第1关:数据集的加载
import os
import numpy as np
import cv2
def get_train_data(data_path):
images = []
onehot = np.zeros((500, 2))
filenames = os.listdir(data_path)
for i, filename in enumerate(filenames):
img = cv2.imread(os.path.join(data_path, filename))
resized_img = cv2.resize(img, (32, 32))
normalized_img = resized_img / 255.0
images.append(normalized_img)
label = int(filename.split('.')[0] == 'dog')
onehot[i, label] = 1
return np.array(images), onehot
if __name__ == '__main__':
pass
第2关:构建属于自己的卷积神经网络模型
import os
import numpy as np
import cv2
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, regularizers, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
def build_model():
model = keras.Sequential()
'''
请按要求构建模型:
1.16个3*3的卷积核组成的卷积层,激活函数为relu
2.最大池化层,池化核大小为2*2
3.16个3*3的卷积核组成的卷积层,激活函数为relu
4.最大池化层,池化核大小为2*2
5.扁平
6.20个神经元的全连接层,激活函数为relu
7.2个神经元的全连接层,激活函数为softmax
'''
'''begin'''
model.add(Conv2D(16, kernel_size=3, activation='relu', input_shape=[IMAGE_HEIGHT, IMAGE_WIDTH, 3]))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(16, kernel_size=3, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Flatten())
model.add(Dense(20, activation='relu'))
model.add(Dense(2, activation='softmax'))
'''end'''
return model
if __name__ == '__main__':
pass
第3关:训练并保存模型
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import keras
def train_model(images, onehot, model):
'''
填写代码满足以下要求:
1.设置模型使用交叉熵损失函数、使用学习率为0.0002的mini - batch梯度下降方法来优化参数、性能指标设置成正确率
2.训练模型的迭代次数设置成10次,batch数量设置成32
3.模型保存的目录为:.step3/DogVSCat.h5
4.verbose设置成0
'''
'''begin'''
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.0002),
metrics=['accuracy'])
model.fit(images, onehot, epochs=10, batch_size=32, verbose=0)
model.save('./step3/DogVSCat.h5')
'''end'''
第4关:加载模型并预测
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import load_model
def predict(images):
'''
完成以下功能:
1.加载当前目录下的DogVSCat.h5模型文件
2.对images进行预测,batch的数量为10
3.计算出概率最大的列的索引并将其返回
'''
'''begin'''
model = load_model('./DogVSCat.h5')
result = model.predict(images, batch_size=10)
predict_idx = np.argmax(result, axis=1)
return predict_idx
'''end'''