【4】基于深度神经网络的脑电睡眠分期方法研究(训练模型)

在将原始数据打乱并分成训练集和验证集之后,进行训练模型,本文是十二层的卷积结构,实验过程中可调batch size、epoch、优化参数等,输出为分类准确率和分类损失率的折线图

# In[1]:
import os
import shutil

train_dir = r'C:\Users\10133\Desktop\bishe\matlab\traintest\train'
test_dir = r'C:\Users\10133\Desktop\bishe\matlab\traintest\test'#导入训练集和验证集

# In[2]建立结构
from keras import layers
from keras import models

model = models.Sequential()  # 由Sequential model定义的Keras
model.add(layers.Conv2D(32, (5, 5), padding='same', activation='relu',
                        input_shape=(32, 32, 3)))  # 卷积层  (samples, timesteps,features)
model.add(layers.MaxPooling2D(2, 2))  # 池化层  (samples, features)

model.add(layers.Conv2D(64, (5, 5), padding='same', activation='relu'))  # 卷积层
model.add(layers.MaxPooling2D(2, 2))  # 池化层

model.add(layers.Conv2D(128, (5, 5), padding='same', activation='relu'))  # 卷积层
model.add(layers.MaxPooling2D(2, 2))  # 池化层

model.add(layers.Conv2D(128, (5, 5), padding='same', activation='relu'))  # 卷积层
model.add(layers.MaxPooling2D(2, 2))  # 池化层

model.add(layers.Flatten())  # 多维输入一维化
model.add(layers.Dense(512, activation='relu'))  # 全连接层 512个神经元 激活函数Relu
model.add(layers.Dropout(0.4))  # 正则化
model.add(layers.Dense(5, activation='softmax'))  # 返回,输出四个类别的概率 数组 总和为1
model.summary()

# In[3]
from keras import optimizers
import keras

model.compile(loss='categorical_crossentropy',  # 损失函数
              optimizer=optimizers.Adam(lr=1e-4),  # 优化参数
              metrics=['acc'])  # 度量指标

# In[4]:数据读取 数据预处理

from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(  # 训练路径
    directory=train_dir,
    target_size=(256, 256),
    batch_size=4,  # 将数据分解成小批量,训练过程中这个参数应该保持一致
)

test_generator = test_datagen.flow_from_directory(  # 测试路径
    directory=test_dir,
    target_size=(256, 256),
    batch_size=4,
)

# In[5]:训练模型,对model.fit()的调用返回一个history对象 包含在训练期间发生的所有事情的数据

num_of_train_samples = 158
num_of_test_samples = 39
batch_size =4
epochs = 10#迭代次数,训练过程中的所有迭代次数应该是一样的

history = model.fit(
    train_generator,
    steps_per_epoch=num_of_train_samples // batch_size,
    epochs=epochs,
    validation_data=test_generator,
    validation_steps=num_of_test_samples // batch_size)

# In[6]:#history有四个关键词 用来绘制图像
import numpy as np
from pylab import mpl
import matplotlib.pyplot as plt
from matplotlib.font_manager import _rebuild

_rebuild()
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))


def smooth_curve(points, factor=0.8):  # 查看loss曲线 利用代码实现在TensorBoard里面的将曲线变平缓的功能
    smoothed_points = []
    for point in points:
        if smoothed_points:
            previous = smoothed_points[-1]
            smoothed_points.append(previous * factor + point * (1 - factor))
        else:
            smoothed_points.append(point)
    return smoothed_points


plt.plot(epochs, acc, color='red', marker='o', label='Training acc')
plt.plot(epochs, val_acc, color='blue', marker='d', label='Validation acc')
plt.annotate('精度{0}'.format(np.max(val_acc)), xy=(np.argmax(val_acc), np.max(val_acc)),
             arrowprops=dict(facecolor="orange", shrink=0.05), fontsize=10, color='black')
# plt.title('矩阵图')
plt.xlabel('迭代次数/次', verticalalignment='top', fontsize=12)
plt.ylabel('精度/%', horizontalalignment='right', fontsize=12)
plt.legend()

plt.savefig(r'C:\Users\10133\Desktop\bishe\matlab\分类准确率.jpg')
plt.figure()

plt.plot(epochs, loss, color='red', marker='o', label='Training loss')
plt.plot(epochs, val_loss, color='blue', marker='p', label='Validation loss')
# plt.title('矩阵图')
plt.xlabel('迭代次数/次', verticalalignment='top', fontsize=12)
plt.ylabel('损失/%', horizontalalignment='right', fontsize=12)
plt.savefig(r'C:\Users\10133\Desktop\bishe\matlab\分类损失率.jpg')
plt.legend()

plt.show()

# In[7]:

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

# 存储模型训练精度
steps = []
for i in range(1, 11):
    steps.append(str(i))
steps = list(map(int, steps))

import numpy as np
import xlwt

# 创建工作薄,但是好像运行的时候表单一直没有出来过
book = xlwt.Workbook()

# 创建表单
sheet1 = book.add_sheet(u'sheet1', cell_overwrite_ok=True)

for i in range(epochs):
    sheet1.write(i, 0, steps[i])
    sheet1.write(i, 1, val_acc[i])
    sheet1.write(i, 2, acc[i])
    sheet1.write(i, 3, loss[i])
    sheet1.write(i, 4, val_loss[i])
    # sheet1.write(11,1,str(loss_test))
    # sheet1.write(11,2,str(acc_test))
# 保存文件
book.save(r'C:\Users\10133\Desktop\bishe\matlab\分类训练值.xls')

  • 0
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值