Python深度学习(二)

对 kaggle 数据中的 cats and dogs 中的图像进行二分类.


'''
对 kaggle 中的猫狗数据分类
train set = 2000 #1000张猫的图像,1000张狗的图像
validation = 1000 #500张猫的图像,1000张狗的图像
test = 1000 #500张猫的图像,1000张狗的图像
'''


import os, shutil

#original_dataset_dir = r'/home/paul/视频/kaggle/train'
#original_dataset_dir1 = r'/home/paul/视频/kaggle/train'
#original_dataset_dir2 = r'/home/paul/视频/kaggle/test1'
#保存较小数据集的目录
base_dir = r'/media/amax/2A30198930195CDF/paul/练习/data/kaggle/cats_and_dogs_small'
# os.mkdir(base_dir)
#新建训练集目录
train_dir = os.path.join(base_dir, 'train')
# os.mkdir(train_dir)
#新建验证集目录
validation_dir = os.path.join(base_dir, 'validation')
# os.mkdir(validation_dir)
#新建测试集目录
test_dir = os.path.join(base_dir, 'test')
# os.mkdir(test_dir)

#猫的训练目录
train_cats_dir = os.path.join(train_dir, 'cats')
# os.mkdir(train_cats_dir)
#狗的训练目录
train_dogs_dir = os.path.join(train_dir, 'dogs')
# os.mkdir(train_dogs_dir)
#猫的验证目录
validation_cats_dir = os.path.join(validation_dir, 'cats')
# os.mkdir(validation_cats_dir)
#狗的验证目录
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# os.mkdir(validation_dogs_dir)
#猫的测试集目录
test_cats_dir = os.path.join(test_dir, 'cats')
# os.mkdir(test_cats_dir)
#狗的测试集目录
test_dogs_dir = os.path.join(test_dir, 'dogs')
# os.mkdir(test_dogs_dir)

'''
#产生猫的训练数据数据
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(train_cats_dir, fname)
    shutil.copyfile(src, dst)

#产生猫的验证数据
fnames = ['cat.{}.jpg'.format(i) for i in range(1000,1500)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(validation_cats_dir,fname)
    shutil.copyfile(src, dst)
#产生猫的测试数据
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(test_cats_dir, fname)
    shutil.copyfile(src, dst)

#产生狗的训练数据
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(train_dogs_dir, fname)
    shutil.copyfile(src, dst)
#产生狗的验证数据
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(validation_dogs_dir, fname)
    shutil.copyfile(src, dst)
#产生狗的测试数据
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(test_dogs_dir, fname)
    shutil.copyfile(src, dst)
'''
'''
print('total train cat images:',len(os.listdir(train_cats_dir)))
print('total train dog images:', len(os.listdir(train_dogs_dir)))
print('total test cat image:',len(os.listdir(test_cats_dir)))
print('total test dog images:', len(os.listdir(test_dogs_dir)))
print('total validtation cat images:',len(os.listdir(validation_cats_dir)))
print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
print("*"*20)
print('')
'''
from keras import models
from keras import layers
from keras import optimizers

model = models.Sequential() #建立模型
#构建卷基层
model.add(layers.Conv2D(32, (3, 3), activation='relu',
                        input_shape=(150, 150, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D(2, 2))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D(2, 2))

model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

# model.summary()
#编译
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['acc'])

#使用ImageDataGenerator从目录中读取图像
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    train_dir,
    target_size=(150, 150), #所有图像的大小都调整为150*150
    batch_size=20,
    class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
    validation_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary'
)
'''
for data_batch, labels_batch in train_generator:
    print('data batch shape:', data_batch.shape)
    print('labels batch shape:', labels_batch.shape)
    break
'''
history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=validation_generator,
    validation_steps=50,
)
model.save('cats_and_dogs_small_1.h5')

#画曲线
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc)+1)
plt.plot(epochs, loss, 'bo', label='Train loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Train and Validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()

plt.figure()#另一块画布

plt.plot(epochs, acc, 'ro', label='Train acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Train and validation acc')
plt.xlabel('epoch')
plt.ylabel('acc')
plt.legend()

plt.show()

result :

损失和精度图 :

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值