keras 由model.train_on_batch()到model.fit()

原始代码

'''基本上算是最终版的, 不错的预测, loss<1'''

import csv
from functools import lru_cache
import numpy as np
import matplotlib.pyplot as plt

import keras
from keras import backend as K
from keras.layers import Layer
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Activation

from pylab import *
from mpl_toolkits.mplot3d import Axes3D

'''设置文件保存路径'''
path = './model_save/4D/Use5/'


'''导入csv文件(返回的数据为ndarry)'''
# 训练集
csv_train = np.loadtxt(open("./train_last.csv","rb"),delimiter=",",skiprows=0) 
print('train.csv导入成功!')
# 测试集
csv_test = np.loadtxt(open("./test_last.csv","rb"),delimiter=",",skiprows=0) 
print('test.csv导入成功!')


'''选取输入向量'''
# 训练集
X_train = csv_train[:,0:3]# [:,np.newaxis]
Y_train = csv_train[:,3]
# 测试集
X_test = csv_test[:,0:3]
Y_test = csv_test[:,3]


# 创建RBF层
class RBFLayer(Layer):
    def __init__(self, units, gamma, **kwargs):
        super(RBFLayer, self).__init__(**kwargs)
        self.units = units
        self.gamma = K.cast_to_floatx(gamma)

    def build(self, input_shape):
        self.mu = self.add_weight(name='mu',
                                  shape=(int(input_shape[1]), self.units),
                                  initializer='uniform',
                                  trainable=True)
        super(RBFLayer, self).build(input_shape)

    def call(self, inputs):
        diff = K.expand_dims(inputs) - self.mu
        l2 = K.sum(K.pow(diff,2), axis=1)
        res = K.exp(-1 * self.gamma * l2)
        return res

    def compute_output_shape(self, input_shape):
        return (input_shape[0], self.units)



'''搭建网络结构'''
'''Glorot 正态分布初始化器,也称为 Xavier 正态分布初始化器。
它从以 0 为中心,标准差为 stddev = sqrt(2 / (fan_in + fan_out)) 的截断正态分布中抽取样本, 
其中 fan_in 是权值张量中的输入单位的数量, fan_out 是权值张量中的输出单位的数量。'''
## keras.initializers.glorot_normal(seed=None) 

model = Sequential()

# 第1层
model.add(Dense(units = 3, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 8, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 16, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 32, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 64, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 128, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 256, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

# model.add(keras.layers.Dropout(0.9))
# model.add(Dense(units = 1024, input_dim = 1024))
# model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

# model.add(keras.layers.Dropout(0.9))
# model.add(Dense(units = 1024, input_dim = 1024))
# model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

# model.add(keras.layers.Dropout(0.9))
# model.add(Dense(units = 1024, input_dim = 1024))
# model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 1024))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 1024)) 
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 256, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 128, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 64, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 32, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 16, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 8, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 4, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 2, input_dim = 4))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 1, input_dim = 64, use_bias=True))
     
# 显示
model.summary()
'''
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 3)                 12
_________________________________________________________________
batch_normalization_1 (Batch (None, 3)                 12
_________________________________________________________________
dense_2 (Dense)              (None, 8)                 32
_________________________________________________________________
batch_normalization_2 (Batch (None, 8)                 32
_________________________________________________________________
activation_1 (Activation)    (None, 8)                 0
_________________________________________________________________
dense_3 (Dense)              (None, 16)                144
_________________________________________________________________
batch_normalization_3 (Batch (None, 16)                64
_________________________________________________________________
activation_2 (Activation)    (None, 16)                0
_________________________________________________________________
dense_4 (Dense)              (None, 32)                544
_________________________________________________________________
batch_normalization_4 (Batch (None, 32)                128
_________________________________________________________________
activation_3 (Activation)    (None, 32)                0
_________________________________________________________________
dense_5 (Dense)              (None, 64)                2112
_________________________________________________________________
batch_normalization_5 (Batch (None, 64)                256
_________________________________________________________________
activation_4 (Activation)    (None, 64)                0
_________________________________________________________________
dense_6 (Dense)              (None, 128)               8320
_________________________________________________________________
batch_normalization_6 (Batch (None, 128)               512
_________________________________________________________________
activation_5 (Activation)    (None, 128)               0
_________________________________________________________________
dense_7 (Dense)              (None, 256)               33024
_________________________________________________________________
batch_normalization_7 (Batch (None, 256)               1024      
_________________________________________________________________
activation_6 (Activation)    (None, 256)               0
_________________________________________________________________
dropout_1 (Dropout)          (None, 256)               0
_________________________________________________________________
dense_8 (Dense)              (None, 512)               131584
_________________________________________________________________
batch_normalization_8 (Batch (None, 512)               2048
_________________________________________________________________
activation_7 (Activation)    (None, 512)               0
_________________________________________________________________
dropout_2 (Dropout)          (None, 512)               0
_________________________________________________________________
dense_9 (Dense)              (None, 1024)              525312
_________________________________________________________________
batch_normalization_9 (Batch (None, 1024)              4096
_________________________________________________________________
activation_8 (Activation)    (None, 1024)              0
_________________________________________________________________
dropout_3 (Dropout)          (None, 1024)              0
_________________________________________________________________
dense_10 (Dense)             (None, 1024)              1049600
_________________________________________________________________
batch_normalization_10 (Batc (None, 1024)              4096
_________________________________________________________________
activation_9 (Activation)    (None, 1024)              0
_________________________________________________________________
dropout_4 (Dropout)          (None, 1024)              0
_________________________________________________________________
dense_11 (Dense)             (None, 512)               524800
_________________________________________________________________
batch_normalization_11 (Batc (None, 512)               2048
_________________________________________________________________
activation_10 (Activation)   (None, 512)               0
_________________________________________________________________
dense_12 (Dense)             (None, 256)               131328
_________________________________________________________________
batch_normalization_12 (Batc (None, 256)               1024
_________________________________________________________________
activation_11 (Activation)   (None, 256)               0
_________________________________________________________________
dense_13 (Dense)             (None, 128)               32896
_________________________________________________________________
batch_normalization_13 (Batc (None, 128)               512
_________________________________________________________________
activation_12 (Activation)   (None, 128)               0
_________________________________________________________________
dense_14 (Dense)             (None, 64)                8256
_________________________________________________________________
batch_normalization_14 (Batc (None, 64)                256
_________________________________________________________________
activation_13 (Activation)   (None, 64)                0
_________________________________________________________________
dense_15 (Dense)             (None, 32)                2080
_________________________________________________________________
batch_normalization_15 (Batc (None, 32)                128       
_________________________________________________________________
activation_14 (Activation)   (None, 32)                0
_________________________________________________________________
dense_16 (Dense)             (None, 16)                528
_________________________________________________________________
batch_normalization_16 (Batc (None, 16)                64
_________________________________________________________________
activation_15 (Activation)   (None, 16)                0
_________________________________________________________________
dense_17 (Dense)             (None, 8)                 136
_________________________________________________________________
batch_normalization_17 (Batc (None, 8)                 32
_________________________________________________________________
activation_16 (Activation)   (None, 8)                 0
_________________________________________________________________
dense_18 (Dense)             (None, 4)                 36
_________________________________________________________________
batch_normalization_18 (Batc (None, 4)                 16
_________________________________________________________________
activation_17 (Activation)   (None, 4)                 0
_________________________________________________________________
dense_19 (Dense)             (None, 2)                 10
_________________________________________________________________
batch_normalization_19 (Batc (None, 2)                 8
_________________________________________________________________
activation_18 (Activation)   (None, 2)                 0
_________________________________________________________________
dense_20 (Dense)             (None, 1)                 3
=================================================================
Total params: 2,467,113
Trainable params: 2,458,935
Non-trainable params: 8,178
_________________________________________________________________
'''

# 选择优化器
Adagrad = keras.optimizers.Adagrad(lr=0.1, epsilon=None, decay=0.0)

# 设置优化器和损失函数
model.compile(optimizer=Adagrad, loss='mse')

# 导入模型
# model.load_weights(path + 'step=98400&loss=1.9879720564241763.h5')

'''训练'''
less = 100

for step in range(200001):


    loss_train = model.train_on_batch(X_train, Y_train)
    loss_test = model.evaluate(X_test, Y_test, verbose=0)
    
    # print(model.evaluate(X_test,Y_test))
    if step % 100 == 0:

        print("The step is ", step , "..............." + '[loss_train]:', loss_train)
        print("The step is ", step , "..............................................." + '[loss_test]:', loss_test)

    if (step > 10000) & (loss_test < less) :
            
        less = loss_test
        save_path = path + 'step=' + str(step) + '&loss=' + str(loss_test) + '.h5'
        model.save_weights(save_path)


'''保存模型(本次最终)'''
save_path = path + 'loss=' + str(loss_test) + '.h5'
model.save_weights(save_path)



# =================================================================================================================================
# =================================================================================================================================

# 结果记录
'''

'''

替换后

'''在Use3的基础上进行修改, 并使用model.fit()进行训练'''

import csv
from functools import lru_cache
import numpy as np
import matplotlib.pyplot as plt

import keras
from keras import backend as K
from keras.layers import Layer
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Activation

from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint

from pylab import *
from mpl_toolkits.mplot3d import Axes3D


# 重新声明
from keras.layers.advanced_activations import LeakyReLU
class LeakyReLU(LeakyReLU):
    def __init__(self, **kwargs):
        self.__name__ = "LeakyReLU"
        super(LeakyReLU, self).__init__(**kwargs)

leakyrelu_alpha = 0.3

'''设置文件保存路径'''
path = './model_save/4D/Use5/'


'''导入csv文件(返回的数据为ndarry)'''
# 训练集
csv_train = np.loadtxt(open("./train_last.csv","rb"),delimiter=",",skiprows=0) 
print('train.csv导入成功!')
# 测试集
csv_test = np.loadtxt(open("./test_last.csv","rb"),delimiter=",",skiprows=0) 
print('test.csv导入成功!')


'''选取输入向量'''
# 训练集
X_train = csv_train[:,0:3]# [:,np.newaxis]
Y_train = csv_train[:,3]
# 测试集
X_test = csv_test[:,0:3]
Y_test = csv_test[:,3]


# 创建RBF层
class RBFLayer(Layer):
    def __init__(self, units, gamma, **kwargs):
        super(RBFLayer, self).__init__(**kwargs)
        self.units = units
        self.gamma = K.cast_to_floatx(gamma)

    def build(self, input_shape):
        self.mu = self.add_weight(name='mu',
                                  shape=(int(input_shape[1]), self.units),
                                  initializer='uniform',
                                  trainable=True)
        super(RBFLayer, self).build(input_shape)

    def call(self, inputs):
        diff = K.expand_dims(inputs) - self.mu
        l2 = K.sum(K.pow(diff,2), axis=1)
        res = K.exp(-1 * self.gamma * l2)
        return res

    def compute_output_shape(self, input_shape):
        return (input_shape[0], self.units)



'''搭建网络结构'''
'''Glorot 正态分布初始化器,也称为 Xavier 正态分布初始化器。
它从以 0 为中心,标准差为 stddev = sqrt(2 / (fan_in + fan_out)) 的截断正态分布中抽取样本, 
其中 fan_in 是权值张量中的输入单位的数量, fan_out 是权值张量中的输出单位的数量。'''
## keras.initializers.glorot_normal(seed=None) 

model = Sequential()

# 第1层
model.add(Dense(units = 3, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(Dense(units = 8, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 16, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 32, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 64, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 128, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 256, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

# model.add(keras.layers.Dropout(0.9))
# model.add(Dense(units = 1024, input_dim = 1024))
# model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

# model.add(keras.layers.Dropout(0.9))
# model.add(Dense(units = 1024, input_dim = 1024))
# model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

# model.add(keras.layers.Dropout(0.9))
# model.add(Dense(units = 1024, input_dim = 1024))
# model.add(keras.layers.normalization.BatchNormalization())
# model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 1024))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 1024)) 
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 256, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 128, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 64, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 32, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 16, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 8, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 4, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 2, input_dim = 4))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))

model.add(Dense(units = 1, input_dim = 64, use_bias=True))
     
# 显示
model.summary()
'''
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 3)                 12
_________________________________________________________________
batch_normalization_1 (Batch (None, 3)                 12
_________________________________________________________________
dense_2 (Dense)              (None, 8)                 32
_________________________________________________________________
batch_normalization_2 (Batch (None, 8)                 32
_________________________________________________________________
activation_1 (Activation)    (None, 8)                 0
_________________________________________________________________
dense_3 (Dense)              (None, 16)                144
_________________________________________________________________
batch_normalization_3 (Batch (None, 16)                64
_________________________________________________________________
activation_2 (Activation)    (None, 16)                0
_________________________________________________________________
dense_4 (Dense)              (None, 32)                544
_________________________________________________________________
batch_normalization_4 (Batch (None, 32)                128
_________________________________________________________________
activation_3 (Activation)    (None, 32)                0
_________________________________________________________________
dense_5 (Dense)              (None, 64)                2112
_________________________________________________________________
batch_normalization_5 (Batch (None, 64)                256
_________________________________________________________________
activation_4 (Activation)    (None, 64)                0
_________________________________________________________________
dense_6 (Dense)              (None, 128)               8320
_________________________________________________________________
batch_normalization_6 (Batch (None, 128)               512
_________________________________________________________________
activation_5 (Activation)    (None, 128)               0
_________________________________________________________________
dense_7 (Dense)              (None, 256)               33024
_________________________________________________________________
batch_normalization_7 (Batch (None, 256)               1024      
_________________________________________________________________
activation_6 (Activation)    (None, 256)               0
_________________________________________________________________
dropout_1 (Dropout)          (None, 256)               0
_________________________________________________________________
dense_8 (Dense)              (None, 512)               131584
_________________________________________________________________
batch_normalization_8 (Batch (None, 512)               2048
_________________________________________________________________
activation_7 (Activation)    (None, 512)               0
_________________________________________________________________
dropout_2 (Dropout)          (None, 512)               0
_________________________________________________________________
dense_9 (Dense)              (None, 1024)              525312
_________________________________________________________________
batch_normalization_9 (Batch (None, 1024)              4096
_________________________________________________________________
activation_8 (Activation)    (None, 1024)              0
_________________________________________________________________
dropout_3 (Dropout)          (None, 1024)              0
_________________________________________________________________
dense_10 (Dense)             (None, 1024)              1049600
_________________________________________________________________
batch_normalization_10 (Batc (None, 1024)              4096
_________________________________________________________________
activation_9 (Activation)    (None, 1024)              0
_________________________________________________________________
dropout_4 (Dropout)          (None, 1024)              0
_________________________________________________________________
dense_11 (Dense)             (None, 512)               524800
_________________________________________________________________
batch_normalization_11 (Batc (None, 512)               2048
_________________________________________________________________
activation_10 (Activation)   (None, 512)               0
_________________________________________________________________
dense_12 (Dense)             (None, 256)               131328
_________________________________________________________________
batch_normalization_12 (Batc (None, 256)               1024
_________________________________________________________________
activation_11 (Activation)   (None, 256)               0
_________________________________________________________________
dense_13 (Dense)             (None, 128)               32896
_________________________________________________________________
batch_normalization_13 (Batc (None, 128)               512
_________________________________________________________________
activation_12 (Activation)   (None, 128)               0
_________________________________________________________________
dense_14 (Dense)             (None, 64)                8256
_________________________________________________________________
batch_normalization_14 (Batc (None, 64)                256
_________________________________________________________________
activation_13 (Activation)   (None, 64)                0
_________________________________________________________________
dense_15 (Dense)             (None, 32)                2080
_________________________________________________________________
batch_normalization_15 (Batc (None, 32)                128       
_________________________________________________________________
activation_14 (Activation)   (None, 32)                0
_________________________________________________________________
dense_16 (Dense)             (None, 16)                528
_________________________________________________________________
batch_normalization_16 (Batc (None, 16)                64
_________________________________________________________________
activation_15 (Activation)   (None, 16)                0
_________________________________________________________________
dense_17 (Dense)             (None, 8)                 136
_________________________________________________________________
batch_normalization_17 (Batc (None, 8)                 32
_________________________________________________________________
activation_16 (Activation)   (None, 8)                 0
_________________________________________________________________
dense_18 (Dense)             (None, 4)                 36
_________________________________________________________________
batch_normalization_18 (Batc (None, 4)                 16
_________________________________________________________________
activation_17 (Activation)   (None, 4)                 0
_________________________________________________________________
dense_19 (Dense)             (None, 2)                 10
_________________________________________________________________
batch_normalization_19 (Batc (None, 2)                 8
_________________________________________________________________
activation_18 (Activation)   (None, 2)                 0
_________________________________________________________________
dense_20 (Dense)             (None, 1)                 3
=================================================================
Total params: 2,467,113
Trainable params: 2,458,935
Non-trainable params: 8,178
_________________________________________________________________
'''

# 选择优化器
Adagrad = keras.optimizers.Adagrad(lr=0.1, epsilon=None, decay=0.0)

# 设置优化器和损失函数
model.compile(optimizer=Adagrad, loss='mse')

# 导入模型
model.load_weights(path + 'weights.hdf5')

'''训练'''
# less = 100

# for step in range(200001):


#     loss_train = model.train_on_batch(X_train, Y_train)
#     loss_test = model.evaluate(X_test, Y_test, verbose=0)
    
#     # print(model.evaluate(X_test,Y_test))
#     if step % 100 == 0:

#         print("The step is ", step , "..............." + '[loss_train]:', loss_train)
#         print("The step is ", step , "..............................................." + '[loss_test]:', loss_test)

#     if (step > 10000) & (loss_test < less) :
            
#         less = loss_test
#         save_path = path + 'step=' + str(step) + '&loss=' + str(loss_test) + '.h5'
#         model.save_weights(save_path)

# 模型不收敛则停止
EarlyStop=EarlyStopping(monitor='mse',
                        patience=2,
                        verbose=1, 
                        mode='auto')
#减小学习率
Reduce=ReduceLROnPlateau(monitor='mse',
                         factor=0.1,
                         patience=1,
                         verbose=1,
                         mode='auto',
                         epsilon=0.0001,
                         cooldown=0,
                         min_lr=0)
# 保存模型
checkpointer = ModelCheckpoint(filepath = path + 'weights.hdf5', 
                               verbose=1, 
                               save_best_only=True)

# Fit the model
history=model.fit(X_train, Y_train, 
                  validation_data=(X_test, Y_test),
                  epochs=200, batch_size=200,
                  callbacks=[EarlyStop, Reduce, checkpointer], 
                  verbose=2)

# Final evaluation of the model
scores = model.evaluate(X_test, Y_test, verbose=1)
# print("Baseline Error: %.2f%%" % (100-scores[1]*100))
print(scores)


'''保存模型(本次最终)'''
save_path = path + 'loss=' + str(scores) + '.h5'
model.save_weights(save_path)



# =================================================================================================================================
# =================================================================================================================================

# 结果记录
'''

'''
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值