1、Kaggle平台上的10monkeys数据集结构如下:
2、实战——读取数据
train_dir = "./10-monkey-species/training/training"
valid_dir = "./10-monkey-species/validation/validation"
label_file = "./10-monkey-species/monkey_labels.txt"
print(os.path.exists(train_dir))
print(os.path.exists(valid_dir))
print(os.path.exists(label_file))
print(os.listdir(train_dir))
print(os.listdir(valid_dir))
True True True ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9'] ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9']
#读取labels:子文件夹名字到真正类别名字的映射
labels = pd.read_csv(label_file, header=0)
print(labels)
Label Latin Name Common Name \ 0 n0 alouatta_palliata\t mantled_howler 1 n1 erythrocebus_patas\t patas_monkey 2 n2 cacajao_calvus\t bald_uakari 3 n3 macaca_fuscata\t japanese_macaque 4 n4 cebuella_pygmea\t pygmy_marmoset 5 n5 cebus_capucinus\t white_headed_capuchin 6 n6 mico_argentatus\t silvery_marmoset 7 n7 saimiri_sciureus\t common_squirrel_monkey 8 n8 aotus_nigriceps\t black_headed_night_monkey 9 n9 trachypithecus_johnii nilgiri_langur Train Images Validation Images 0 131 26 1 139 28 2 137 27 3 152 30 4 131 26 5 141 28 6 132 26 7 142 28 8 133 27 9 132 26
#读取图片
height = 128 #对读取出来的图片做大小变换
width = 128
channels = 3
batch_size = 64
num_classes = 10
#对于图片数据:在keras里有一个更高层的封装(读取数据且做数据增强),Generator。
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1./255, #图片中的每个像素点乘以它,变为(0,1)之间的数
rotation_range = 40, #图像随机旋转(-40,40)之间的角度
width_shift_range = 0.2, #做位移,if是(0,1),代表比例,if>1,则代表像素值。
height_shift_range = 0.2, #在(0,20%)之间随机选一个数做平移
shear_range = 0.2, #剪切强度
zoom_range = 0.2, #缩放强度
horizontal_flip = True,#随机做水平翻转
fill_mode = 'nearest', #填充像素的规则
)
#note:从文件夹读取图片
train_generator = train_datagen.flow_from_directory(train_dir,
target_size = (height, width),
batch_size = batch_size,#生成的图片以多少张为一组
seed = 7,
shuffle = True,
class_mode = "categorical") #控制label的格式:one-hot编码后的,若为sparse,则label为一个数.
valid_datagen = keras.preprocessing.image.ImageDataGenerator(rescale = 1./255)
valid_generator = valid_datagen.flow_from_directory(valid_dir,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = False,
class_mode = "categorical")
#在训练集和验征集分别有多少张数据
train_num = train_generator.samples
valid_num = valid_generator.samples
print(train_num, valid_num)
Found 1098 images belonging to 10 classes. Found 272 images belonging to 10 classes. 1098 272
for i in range(1):
x, y = train_generator.next() #获得下一组的数据
print(x.shape, y.shape)
print(y)
x.shape:(batch_size,height,width,channels)
y.shape:if class_mode = "categorical",则为(batch_size,num_classes),if class_mode = "sparse",则为(batch_size,)
(64, 128, 128, 3) (64, 10) [[0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]]
3、实战——模型搭建与训练
网络结构:
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 128, 128, 32) 896 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 32) 9248 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 64, 64, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 64) 18496 _________________________________________________________________ conv2d_3 (Conv2D) (None, 64, 64, 64) 36928 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 32, 32, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 32, 32, 128) 73856 _________________________________________________________________ conv2d_5 (Conv2D) (None, 32, 32, 128) 147584 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 16, 16, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 32768) 0 _________________________________________________________________ dense (Dense) (None, 128) 4194432 _________________________________________________________________ dense_1 (Dense) (None, 10) 1290 ================================================================= Total params: 4,482,730 Trainable params: 4,482,730 Non-trainable params: 0 _________________________________________________________________
epochs = 10
history = model.fit_generator(train_generator,
#generator是不停的产生数据的,不知道每个epoch有多少步,因此需要显示指出来。
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size)
分别打印loss曲线和accuracy曲线 :
def plot_learning_curves(history, label, epcohs, min_value, max_value):
data = {}
data[label] = history.history[label]
data['val_'+label] = history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, epochs, min_value, max_value])#定义各个坐标系的区间长度
plt.show()
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 1.5, 2.5)
测试集:
数据集中的图片的类别是对应文件夹的名称,验证集也是如此。测试集的话一般不会有现成的label,测试集的样子就是一堆图片。然后你要做的就得是写一个新的generator去处理这些图片,我们用的是train_datagen.flow_from_directory所以它才会按照文件夹去读取。而如果没有这些文件夹呢,你有两种做法,第一是去生成一个新的generator,这个generator不按照文件夹读入数据。第二是把模型应该可以直接输入tensor,你可以把图片转成tensor直接输入,不过需要你手工的去做generator预处理的事情,包括值的缩放,图片裁剪等。
4、10monkeys模型微调
迁移学习——fine-tune
在本次实战中,我们会在resnet50的网络结构上做微调,使之适应我们的数据。
resnet50是一个拥有50个层次的残差网络,残差网络是卷积神经网络里一个重要的网络结构。
在前面的代码中进行少量修改即可:将图片大小改为224,并将图像的归一化替换为keras.applications.resnet50.preprocess_input。
height = 224
width = 224
channels = 3
batch_size = 24
num_classes = 10
train_datagen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = keras.applications.resnet50.preprocess_input,#预处理,将图像做了归一化
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest',
)
train_generator = train_datagen.flow_from_directory(train_dir,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = True,
class_mode = "categorical")
valid_datagen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = keras.applications.resnet50.preprocess_input)
valid_generator = valid_datagen.flow_from_directory(valid_dir,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = False,
class_mode = "categorical")
train_num = train_generator.samples
valid_num = valid_generator.samples
print(train_num, valid_num)
网络结构的变化较大,它将resnet50作为一层添加到网络结构中,如下所示:
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= resnet50 (Model) (None, 2048) 23587712 _________________________________________________________________ dense (Dense) (None, 10) 20490 ================================================================= Total params: 23,608,202 Trainable params: 20,490 Non-trainable params: 23,587,712
resnet50_fine_tune = keras.models.Sequential()
resnet50_fine_tune.add(keras.applications.ResNet50(include_top = False, #1000类-10类:去掉最后一层
pooling = 'avg',
#resnet50倒数第二层的输出是卷积层的输出,是三维矩阵,而不是向量,不能直接把全连接层加上去
weights = 'imagenet'))
#weight有两个值:1.none:从头开始训练 2.imagenet:下载一个已经训练好的模型,用训练好的模型中的参数初始化网络结构
resnet50_fine_tune.add(keras.layers.Dense(num_classes, activation = 'softmax'))#因为不包括最后一层,所以添加一层全连接层
resnet50_fine_tune.layers[0].trainable = False
resnet50_fine_tune.compile(loss="categorical_crossentropy",
optimizer="sgd", metrics=['accuracy']) #对于fine_tune来说,sgd是一个好的选择
resnet50_fine_tune.summary()
note:“resnet50倒数第二层的输出是卷积层的输出,是三维矩阵,而不是向量,不能直接把全连接层加上去”,为此有两种做法:
1、做pooling,把图像那两个维度消掉
2、先展平,设置pooling = None
在此是设置为了average_pooling
pooling size 是(2,2)的时候是大小减半,而pooling size恰好等于图像大小的时候,就可以降维
在上面的网络结构中,只有最后一层全连接层是可以训练的,如果想要使resnet最后几层也可以训练,也是有办法的,如下:
resnet50 = keras.applications.ResNet50(include_top = False,
pooling = 'avg',
weights = 'imagenet')
for layer in resnet50.layers[0:-5]: #对后5层之前的进行遍历
layer.trainable = False
resnet50_new = keras.models.Sequential([
resnet50,
keras.layers.Dense(num_classes, activation = 'softmax'),
])
resnet50_new.compile(loss="categorical_crossentropy",
optimizer="sgd", metrics=['accuracy'])
resnet50_new.summary()
附10_monkeys代码1:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("./10-monkey-species/"))
# Any results you write to the current directory are saved as output.
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import sys
import tensorflow as tf
import time
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
train_dir = "./10-monkey-species/training/training"
valid_dir = "./10-monkey-species/validation/validation"
label_file = "./10-monkey-species/monkey_labels.txt"
print(os.path.exists(train_dir))
print(os.path.exists(valid_dir))
print(os.path.exists(label_file))
print(os.listdir(train_dir))
print(os.listdir(valid_dir))
#读取labels:子文件夹名字到真正类别名字的映射
labels = pd.read_csv(label_file, header=0)
print(labels)
#读取图片
height = 128 #对读取出来的图片做大小变换
width = 128
channels = 3
batch_size = 64
num_classes = 10
#对于图片数据:在keras里有一个更高层的封装(读取数据且做数据增强),Generator。
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1./255, #图片中的每个像素点乘以它,变为(0,1)之间的数
rotation_range = 40, #图像随机旋转(-40,40)之间的角度
width_shift_range = 0.2, #做位移,if是(0,1),代表比例,if>1,则代表像素值。
height_shift_range = 0.2, #在(0,20%)之间随机选一个数做平移
shear_range = 0.2, #剪切强度
zoom_range = 0.2, #缩放强度
horizontal_flip = True,#随机做水平翻转
fill_mode = 'nearest', #填充像素的规则
)
#从文件夹读取图片
train_generator = train_datagen.flow_from_directory(train_dir,
target_size = (height, width),
batch_size = batch_size,#生成的图片以多少张为一组
seed = 7,
shuffle = True,
class_mode = "categorical") #控制label的格式:one-hot编码后的
valid_datagen = keras.preprocessing.image.ImageDataGenerator(rescale = 1./255)
valid_generator = valid_datagen.flow_from_directory(valid_dir,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = False,
class_mode = "categorical")
#在训练集和验征集分别有多少张数据
train_num = train_generator.samples
valid_num = valid_generator.samples
print(train_num, valid_num)
for i in range(2):
x, y = train_generator.next() #获得下一组的数据
print(x.shape, y.shape)
print(y)
model = keras.models.Sequential([
keras.layers.Conv2D(filters=32, kernel_size=3, padding='same',
activation='relu', input_shape=[width, height, channels]),
keras.layers.Conv2D(filters=32, kernel_size=3, padding='same',
activation='relu'),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(filters=64, kernel_size=3, padding='same',
activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=3, padding='same',
activation='relu'),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, padding='same',
activation='relu'),
keras.layers.Conv2D(filters=128, kernel_size=3, padding='same',
activation='relu'),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(num_classes, activation='softmax'),
])
model.compile(loss="categorical_crossentropy",
optimizer="adam", metrics=['accuracy'])
#adam相对于sgd来说,是一个更高级的优化器,优化快速且稳定
model.summary()
epochs = 10
history = model.fit_generator(train_generator,
#generator是不停的产生数据的,不知道每个epoch有多少步,因此需要显示指出来。
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size)
def plot_learning_curves(history, label, epcohs, min_value, max_value):
data = {}
data[label] = history.history[label]
data['val_'+label] = history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, epochs, min_value, max_value])#定义各个坐标系的区间长度
plt.show()
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 1.5, 2.5)
附10_monkeys微调后的代码2:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("./10-monkey-species/"))
# Any results you write to the current directory are saved as output.
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import sys
import tensorflow as tf
import time
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
train_dir = "./10-monkey-species/training/training"
valid_dir = "./10-monkey-species/validation/validation"
label_file = "./10-monkey-species/monkey_labels.txt"
print(os.path.exists(train_dir))
print(os.path.exists(valid_dir))
print(os.path.exists(label_file))
print(os.listdir(train_dir))
print(os.listdir(valid_dir))
labels = pd.read_csv(label_file, header=0)
print(labels)
height = 224
width = 224
channels = 3
batch_size = 24
num_classes = 10
train_datagen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = keras.applications.resnet50.preprocess_input,#预处理,将图像做了归一化
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest',
)
train_generator = train_datagen.flow_from_directory(train_dir,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = True,
class_mode = "categorical")
valid_datagen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = keras.applications.resnet50.preprocess_input)
valid_generator = valid_datagen.flow_from_directory(valid_dir,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = False,
class_mode = "categorical")
train_num = train_generator.samples
valid_num = valid_generator.samples
print(train_num, valid_num)
for i in range(2):
x, y = train_generator.next()
print(x.shape, y.shape)
print(y)
resnet50_fine_tune = keras.models.Sequential()
resnet50_fine_tune.add(keras.applications.ResNet50(include_top = False, #1000类-10类:去掉最后一层
pooling = 'avg',
#resnet50倒数第二层的输出是卷积层的输出,是三维矩阵,而不是向量,不能直接把全连接层加上去
weights = 'imagenet'))
#weight有两个值:1.none:从头开始训练 2.imagenet:下载一个已经训练好的模型,用训练好的模型中的参数初始化网络结构
resnet50_fine_tune.add(keras.layers.Dense(num_classes, activation = 'softmax'))#因为不包括最后一层,所以添加一层全连接层
resnet50_fine_tune.layers[0].trainable = False
resnet50_fine_tune.compile(loss="categorical_crossentropy",
optimizer="sgd", metrics=['accuracy']) #对于fine_tune来说,sgd是一个好的选择
resnet50_fine_tune.summary()
#一般fine_tune 10次就能达到比较好的效果了
epochs = 10
history = resnet50_fine_tune.fit_generator(train_generator,
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size)
def plot_learning_curves(history, label, epcohs, min_value, max_value):
data = {}
data[label] = history.history[label]
data['val_'+label] = history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, epochs, min_value, max_value])
plt.show()
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 0, 2)
#使resnet最后几层也可以训练
resnet50 = keras.applications.ResNet50(include_top = False,
pooling = 'avg',
weights = 'imagenet')
resnet50.summary()
for layer in resnet50.layers[0:-5]: #对后5层之前的进行遍历
layer.trainable = False
resnet50_new = keras.models.Sequential([
resnet50,
keras.layers.Dense(num_classes, activation = 'softmax'),
])
resnet50_new.compile(loss="categorical_crossentropy",
optimizer="sgd", metrics=['accuracy'])
resnet50_new.summary()
epochs = 10
history = resnet50_new.fit_generator(train_generator,
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size)
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 0, 2)