from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
img = load_img(path+file,target_size=(48,48)) # this is a PIL image #
# 将图片转为数组 #
x = img_to_array(img) # 扩充一个维度
image = np.expand_dims(image, axis=0)
image = image.astype('float32')
image /= 255
跑实验时候,要准备自己的数据集,keras的接口方式还是比较友善的,对图像的数据预处理有两种方式:一种是.flow(),另一种是.flow_from_directory(directory)。
关键导入包:
import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import keras.utils as np_utils
.flow()的方式
此时数据集是已经处理成了数组的形式,如我们从网上下载的公开数据集(nums, col,row,channel)。
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32),
samples_per_epoch=len(X_train), nb_epoch=nb_epoch)
# here's a more "manual" example(手动分批次训练)
for e in range(nb_epoch):
print 'Epoch', e
batches = 0
for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=32):
loss = model.train(X_batch, Y_batch)
batches += 1
if batches >= len(X_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
.flow_from_directory(directory)的方式
这种方式是从文件夹里读入图片,此时传入的文件夹(directory)的路径是数据集总路径,里面包含若干个直接以分类类别命名的子文件夹,各个类别的图片放好在各自所属类别的文件夹里面。代码读入数据成矩阵的同时,根据类别文件夹名字把数据集的class字典做好。目录结构如下图:
└─train
├─class1_name
│ xxr.jpg
│ xxx.jpg
│
├─class2_name
│ xxr.jpg
│ xxx.jpg
│
└─class3_name
xxr.jpg
xxx.jpg
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
samples_per_epoch=2000,
nb_epoch=50,
validation_data=validation_generator,
nb_val_samples=800)
当然,更多信息可以直接看中文文档:https://keras-cn.readthedocs.io/en/latest/
原文链接:https://blog.csdn.net/u010420283/article/details/103228567