5.1简单的神经网络
# coding=utf-8
"""
__project_ = 'Python深度学习'
__file_name__ = '5.1卷积神经网络'
__author__ = 'WIN10'
__time__ = '2020/4/12 15:27'
__product_name = PyCharm
"""
from keras import layers
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical
#读取数据
(train_images,train_labels),(test_images,test_labels)=mnist.load_data()
#数据处理
train_images=train_images.reshape(60000,28,28,1)
train_images=train_images.astype('float32')/255
test_images=test_images.reshape(10000,28,28,1)
test_images=test_images.astype('float32')/255
train_labels=to_categorical(train_labels)
test_labels=to_categorical(test_labels)
#构建网络,Dense 全连接层
model=models.Sequential()
model.add(layers.Conv2D(32,(3,3),activation='relu' ,input_shape=(28,28,1)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dense(10,activation='softmax'))
#编译 需要3个参数 ,损失函数、优化器、训练和测试过程中的键控指标
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#训练
model.fit(train_images,train_labels,epochs=5,batch_size=64)
#测试
test_loss,test_acc=model.evaluate(test_images,test_labels)
print(test_loss,test_acc)
2.卷积神经网络三分类
# coding=utf-8
"""
__project_ = 'Python深度学习'
__file_name__ = '5.2猫狗二分类'
__author__ = 'WIN10'
__time__ = '2020/4/12 16:45'
__product_name = PyCharm
"""
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
def DataGen(dir_path, img_row, img_col, batch_size, is_train):
if is_train:
datagen = ImageDataGenerator(rescale=1. / 255,
zoom_range=0.2,
rotation_range=40.,
shear_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(
dir_path, target_size=(img_row, img_col),
batch_size=batch_size,
# class_mode='binary',
shuffle=is_train)
return generator
# 数据准备
image_size = 65
image_class=3
batch_size = 128
epochs=100
train_image_path='G:\\DL\\MyData\\MattingImages\\train'
test_image_path='G:\\DL\\MyData\\MattingImages\\val'
train_generator = DataGen(train_image_path, image_size, image_size, batch_size, True)
validation_generator = DataGen(test_image_path, image_size, image_size, batch_size, False)
for data_batch,labels_batch in train_generator:
# print(data_batch.shape,labels_batch.shape)
print(labels_batch)
break
# 构建网络,Dense 全连接层
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(image_size,image_size,3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(image_class, activation='softmax'))
#编译 需要3个参数 ,损失函数、优化器、训练和测试过程中的键控指标
model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
#训练
history=model.fit_generator(
train_generator,
steps_per_epoch=2,
epochs=epochs,
validation_data=validation_generator,
validation_steps=1)
model.save('my_model_65.h5')
#绘制训练损失和验证损失
history_dict=history.history
acc=history_dict['accuracy']
val_acc=history_dict['val_accuracy']
loss=history_dict['loss']
val_loss=history_dict['val_loss']
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.title('training and val acc')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('train and val loss')
plt.legend()
plt.show()
5.3迁移学习VGG16
# coding=utf-8
"""
__project_ = 'Python深度学习'
__file_name__ = '5.3迁移学习VGG16'
__author__ = 'WIN10'
__time__ = '2020/4/12 18:53'
__product_name = PyCharm
"""
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
def DataGen(dir_path, img_row, img_col, batch_size, is_train):
if is_train:
datagen = ImageDataGenerator(rescale=1. / 255,
zoom_range=0.2,
rotation_range=40.,
shear_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(
dir_path, target_size=(img_row, img_col),
batch_size=batch_size,
# class_mode='binary',
shuffle=is_train)
return generator
# 数据准备
image_size = 49
image_class=3
batch_size = 128
epochs=50
train_image_path='C:\\Users\\WIN10\\Documents\\WeChat Files\\AV393146198\\FileStorage\\File\\2020-04\\train'
test_image_path='C:\\Users\\WIN10\\Documents\\WeChat Files\\AV393146198\\FileStorage\\File\\2020-04\\test'
train_generator = DataGen(train_image_path, image_size, image_size, batch_size, True)
validation_generator = DataGen(test_image_path, image_size, image_size, batch_size, False)
conv_base=VGG16(weights='imagenet',include_top=False,input_shape=(image_size,image_size,3))
model=models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(image_class, activation='softmax'))
conv_base.trainable=False
#编译 需要3个参数 ,损失函数、优化器、训练和测试过程中的键控指标
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['accuracy'])
#训练
history=model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=epochs,
validation_data=validation_generator,
validation_steps=3)
model.save('VGG16.h5')
#绘制训练损失和验证损失
history_dict=history.history
acc=history_dict['accuracy']
val_acc=history_dict['val_accuracy']
loss=history_dict['loss']
val_loss=history_dict['val_loss']
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.title('training and val acc')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('train and val loss')
plt.legend()
plt.show()
5.4微调VGG16
# coding=utf-8
"""
__project_ = 'Python深度学习'
__file_name__ = '5.4微调VGG16'
__author__ = 'WIN10'
__time__ = '2020/4/12 19:22'
__product_name = PyCharm
"""
# coding=utf-8
from keras.applications import nasnet
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
def DataGen(dir_path, img_row, img_col, batch_size, is_train):
if is_train:
datagen = ImageDataGenerator(rescale=1. / 255,
zoom_range=0.2,
rotation_range=40.,
shear_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(
dir_path, target_size=(img_row, img_col),
batch_size=batch_size,
# class_mode='binary',
shuffle=is_train)
return generator
# 数据准备
image_size = 65
image_class=3
batch_size = 128
epochs=50
train_image_path='G:\\DL\\MyData\\MattingImages\\train'
test_image_path='G:\\DL\\MyData\\MattingImages\\val'
train_generator = DataGen(train_image_path, image_size, image_size, batch_size, True)
validation_generator = DataGen(test_image_path, image_size, image_size, batch_size, False)
conv_base=VGG16(weights='imagenet',include_top=False,input_shape=(image_size,image_size,3))
model=models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(image_class, activation='softmax'))
#冻结直到某一层的所有层
conv_base.trainable=True
set_trainable=False
for layer in conv_base.layers:
if layer.name=='block5_conv1':
set_trainable=True
if set_trainable:
layer.trainable=True
else:
layer.trainable=False
#编译 需要3个参数 ,损失函数、优化器、训练和测试过程中的键控指标
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['accuracy'])
#训练
history=model.fit_generator(
train_generator,
steps_per_epoch=2,
epochs=epochs,
validation_data=validation_generator,
validation_steps=1)
model.save('VGG16_1.h5')
#绘制训练损失和验证损失
history_dict=history.history
acc=history_dict['accuracy']
val_acc=history_dict['val_accuracy']
loss=history_dict['loss']
val_loss=history_dict['val_loss']
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.title('training and val acc')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('train and val loss')
plt.legend()
plt.show()
5.5神经网络可视化
# coding=utf-8
"""
__project_ = 'Python深度学习'
__file_name__ = '5.5神经网络可视化'
__author__ = 'WIN10'
__time__ = '2020/4/12 19:36'
__product_name = PyCharm
"""
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
from keras import models
model = load_model('my_model.h5')
model.summary()
img_path = 'C:\\Users\\WIN10\\Documents\\WeChat Files\\AV393146198\\FileStorage\\File\\2020-04\\train\\back\\100_1481.905_1473.008_10.499.bmp'
img = image.load_img(img_path, target_size=(49, 49))
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255
# print(img_tensor.shape)
# plt.imshow(img_tensor[0])
# plt.show()
layer_outputs = [layer.output for layer in model.layers[:8]]
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(img_tensor)
layer_names = []
for layer in model.layers[:8]:
layer_names.append(layer.name)
print(layer_names)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
print(size)
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size),dtype='float32')
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
channel_image-=channel_image.mean()
channel_image/=channel_image.std()
channel_image*=64
channel_image+=128
channel_image=np.clip(channel_image,0,255).astype('uint8')
display_grid[col * size:(col + 1) * size, row * size:(row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')