原始代码
'''基本上算是最终版的, 不错的预测, loss<1'''
import csv
from functools import lru_cache
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras import backend as K
from keras.layers import Layer
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Activation
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
'''设置文件保存路径'''
path = './model_save/4D/Use5/'
'''导入csv文件(返回的数据为ndarry)'''
csv_train = np.loadtxt(open("./train_last.csv","rb"),delimiter=",",skiprows=0)
print('train.csv导入成功!')
csv_test = np.loadtxt(open("./test_last.csv","rb"),delimiter=",",skiprows=0)
print('test.csv导入成功!')
'''选取输入向量'''
X_train = csv_train[:,0:3]
Y_train = csv_train[:,3]
X_test = csv_test[:,0:3]
Y_test = csv_test[:,3]
class RBFLayer(Layer):
def __init__(self, units, gamma, **kwargs):
super(RBFLayer, self).__init__(**kwargs)
self.units = units
self.gamma = K.cast_to_floatx(gamma)
def build(self, input_shape):
self.mu = self.add_weight(name='mu',
shape=(int(input_shape[1]), self.units),
initializer='uniform',
trainable=True)
super(RBFLayer, self).build(input_shape)
def call(self, inputs):
diff = K.expand_dims(inputs) - self.mu
l2 = K.sum(K.pow(diff,2), axis=1)
res = K.exp(-1 * self.gamma * l2)
return res
def compute_output_shape(self, input_shape):
return (input_shape[0], self.units)
'''搭建网络结构'''
'''Glorot 正态分布初始化器,也称为 Xavier 正态分布初始化器。
它从以 0 为中心,标准差为 stddev = sqrt(2 / (fan_in + fan_out)) 的截断正态分布中抽取样本,
其中 fan_in 是权值张量中的输入单位的数量, fan_out 是权值张量中的输出单位的数量。'''
model = Sequential()
model.add(Dense(units = 3, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Dense(units = 8, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 16, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 32, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 64, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 128, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 256, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 1024))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 1024))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 256, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 128, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 64, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 32, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 16, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 8, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 4, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 2, input_dim = 4))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(keras.layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units = 1, input_dim = 64, use_bias=True))
model.summary()
'''
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) (None, 3) 12
_________________________________________________________________
batch_normalization_1 (Batch (None, 3) 12
_________________________________________________________________
dense_2 (Dense) (None, 8) 32
_________________________________________________________________
batch_normalization_2 (Batch (None, 8) 32
_________________________________________________________________
activation_1 (Activation) (None, 8) 0
_________________________________________________________________
dense_3 (Dense) (None, 16) 144
_________________________________________________________________
batch_normalization_3 (Batch (None, 16) 64
_________________________________________________________________
activation_2 (Activation) (None, 16) 0
_________________________________________________________________
dense_4 (Dense) (None, 32) 544
_________________________________________________________________
batch_normalization_4 (Batch (None, 32) 128
_________________________________________________________________
activation_3 (Activation) (None, 32) 0
_________________________________________________________________
dense_5 (Dense) (None, 64) 2112
_________________________________________________________________
batch_normalization_5 (Batch (None, 64) 256
_________________________________________________________________
activation_4 (Activation) (None, 64) 0
_________________________________________________________________
dense_6 (Dense) (None, 128) 8320
_________________________________________________________________
batch_normalization_6 (Batch (None, 128) 512
_________________________________________________________________
activation_5 (Activation) (None, 128) 0
_________________________________________________________________
dense_7 (Dense) (None, 256) 33024
_________________________________________________________________
batch_normalization_7 (Batch (None, 256) 1024
_________________________________________________________________
activation_6 (Activation) (None, 256) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 256) 0
_________________________________________________________________
dense_8 (Dense) (None, 512) 131584
_________________________________________________________________
batch_normalization_8 (Batch (None, 512) 2048
_________________________________________________________________
activation_7 (Activation) (None, 512) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 512) 0
_________________________________________________________________
dense_9 (Dense) (None, 1024) 525312
_________________________________________________________________
batch_normalization_9 (Batch (None, 1024) 4096
_________________________________________________________________
activation_8 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_3 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_10 (Dense) (None, 1024) 1049600
_________________________________________________________________
batch_normalization_10 (Batc (None, 1024) 4096
_________________________________________________________________
activation_9 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_11 (Dense) (None, 512) 524800
_________________________________________________________________
batch_normalization_11 (Batc (None, 512) 2048
_________________________________________________________________
activation_10 (Activation) (None, 512) 0
_________________________________________________________________
dense_12 (Dense) (None, 256) 131328
_________________________________________________________________
batch_normalization_12 (Batc (None, 256) 1024
_________________________________________________________________
activation_11 (Activation) (None, 256) 0
_________________________________________________________________
dense_13 (Dense) (None, 128) 32896
_________________________________________________________________
batch_normalization_13 (Batc (None, 128) 512
_________________________________________________________________
activation_12 (Activation) (None, 128) 0
_________________________________________________________________
dense_14 (Dense) (None, 64) 8256
_________________________________________________________________
batch_normalization_14 (Batc (None, 64) 256
_________________________________________________________________
activation_13 (Activation) (None, 64) 0
_________________________________________________________________
dense_15 (Dense) (None, 32) 2080
_________________________________________________________________
batch_normalization_15 (Batc (None, 32) 128
_________________________________________________________________
activation_14 (Activation) (None, 32) 0
_________________________________________________________________
dense_16 (Dense) (None, 16) 528
_________________________________________________________________
batch_normalization_16 (Batc (None, 16) 64
_________________________________________________________________
activation_15 (Activation) (None, 16) 0
_________________________________________________________________
dense_17 (Dense) (None, 8) 136
_________________________________________________________________
batch_normalization_17 (Batc (None, 8) 32
_________________________________________________________________
activation_16 (Activation) (None, 8) 0
_________________________________________________________________
dense_18 (Dense) (None, 4) 36
_________________________________________________________________
batch_normalization_18 (Batc (None, 4) 16
_________________________________________________________________
activation_17 (Activation) (None, 4) 0
_________________________________________________________________
dense_19 (Dense) (None, 2) 10
_________________________________________________________________
batch_normalization_19 (Batc (None, 2) 8
_________________________________________________________________
activation_18 (Activation) (None, 2) 0
_________________________________________________________________
dense_20 (Dense) (None, 1) 3
=================================================================
Total params: 2,467,113
Trainable params: 2,458,935
Non-trainable params: 8,178
_________________________________________________________________
'''
Adagrad = keras.optimizers.Adagrad(lr=0.1, epsilon=None, decay=0.0)
model.compile(optimizer=Adagrad, loss='mse')
'''训练'''
less = 100
for step in range(200001):
loss_train = model.train_on_batch(X_train, Y_train)
loss_test = model.evaluate(X_test, Y_test, verbose=0)
if step % 100 == 0:
print("The step is ", step , "..............." + '[loss_train]:', loss_train)
print("The step is ", step , "..............................................." + '[loss_test]:', loss_test)
if (step > 10000) & (loss_test < less) :
less = loss_test
save_path = path + 'step=' + str(step) + '&loss=' + str(loss_test) + '.h5'
model.save_weights(save_path)
'''保存模型(本次最终)'''
save_path = path + 'loss=' + str(loss_test) + '.h5'
model.save_weights(save_path)
'''
'''
替换后
'''在Use3的基础上进行修改, 并使用model.fit()进行训练'''
import csv
from functools import lru_cache
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras import backend as K
from keras.layers import Layer
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Activation
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from keras.layers.advanced_activations import LeakyReLU
class LeakyReLU(LeakyReLU):
def __init__(self, **kwargs):
self.__name__ = "LeakyReLU"
super(LeakyReLU, self).__init__(**kwargs)
leakyrelu_alpha = 0.3
'''设置文件保存路径'''
path = './model_save/4D/Use5/'
'''导入csv文件(返回的数据为ndarry)'''
csv_train = np.loadtxt(open("./train_last.csv","rb"),delimiter=",",skiprows=0)
print('train.csv导入成功!')
csv_test = np.loadtxt(open("./test_last.csv","rb"),delimiter=",",skiprows=0)
print('test.csv导入成功!')
'''选取输入向量'''
X_train = csv_train[:,0:3]
Y_train = csv_train[:,3]
X_test = csv_test[:,0:3]
Y_test = csv_test[:,3]
class RBFLayer(Layer):
def __init__(self, units, gamma, **kwargs):
super(RBFLayer, self).__init__(**kwargs)
self.units = units
self.gamma = K.cast_to_floatx(gamma)
def build(self, input_shape):
self.mu = self.add_weight(name='mu',
shape=(int(input_shape[1]), self.units),
initializer='uniform',
trainable=True)
super(RBFLayer, self).build(input_shape)
def call(self, inputs):
diff = K.expand_dims(inputs) - self.mu
l2 = K.sum(K.pow(diff,2), axis=1)
res = K.exp(-1 * self.gamma * l2)
return res
def compute_output_shape(self, input_shape):
return (input_shape[0], self.units)
'''搭建网络结构'''
'''Glorot 正态分布初始化器,也称为 Xavier 正态分布初始化器。
它从以 0 为中心,标准差为 stddev = sqrt(2 / (fan_in + fan_out)) 的截断正态分布中抽取样本,
其中 fan_in 是权值张量中的输入单位的数量, fan_out 是权值张量中的输出单位的数量。'''
model = Sequential()
model.add(Dense(units = 3, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Dense(units = 8, input_dim = 3))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 16, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 32, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 64, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 128, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 256, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 1024, input_dim = 1024))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(keras.layers.Dropout(0.9))
model.add(Dense(units = 512, input_dim = 1024))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 256, input_dim = 512))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 128, input_dim = 256))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 64, input_dim = 128))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 32, input_dim = 64))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 16, input_dim = 32))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 8, input_dim = 16))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 4, input_dim = 8))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 2, input_dim = 4))
model.add(keras.layers.normalization.BatchNormalization())
model.add(Activation(LeakyReLU(alpha=leakyrelu_alpha)))
model.add(Dense(units = 1, input_dim = 64, use_bias=True))
model.summary()
'''
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) (None, 3) 12
_________________________________________________________________
batch_normalization_1 (Batch (None, 3) 12
_________________________________________________________________
dense_2 (Dense) (None, 8) 32
_________________________________________________________________
batch_normalization_2 (Batch (None, 8) 32
_________________________________________________________________
activation_1 (Activation) (None, 8) 0
_________________________________________________________________
dense_3 (Dense) (None, 16) 144
_________________________________________________________________
batch_normalization_3 (Batch (None, 16) 64
_________________________________________________________________
activation_2 (Activation) (None, 16) 0
_________________________________________________________________
dense_4 (Dense) (None, 32) 544
_________________________________________________________________
batch_normalization_4 (Batch (None, 32) 128
_________________________________________________________________
activation_3 (Activation) (None, 32) 0
_________________________________________________________________
dense_5 (Dense) (None, 64) 2112
_________________________________________________________________
batch_normalization_5 (Batch (None, 64) 256
_________________________________________________________________
activation_4 (Activation) (None, 64) 0
_________________________________________________________________
dense_6 (Dense) (None, 128) 8320
_________________________________________________________________
batch_normalization_6 (Batch (None, 128) 512
_________________________________________________________________
activation_5 (Activation) (None, 128) 0
_________________________________________________________________
dense_7 (Dense) (None, 256) 33024
_________________________________________________________________
batch_normalization_7 (Batch (None, 256) 1024
_________________________________________________________________
activation_6 (Activation) (None, 256) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 256) 0
_________________________________________________________________
dense_8 (Dense) (None, 512) 131584
_________________________________________________________________
batch_normalization_8 (Batch (None, 512) 2048
_________________________________________________________________
activation_7 (Activation) (None, 512) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 512) 0
_________________________________________________________________
dense_9 (Dense) (None, 1024) 525312
_________________________________________________________________
batch_normalization_9 (Batch (None, 1024) 4096
_________________________________________________________________
activation_8 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_3 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_10 (Dense) (None, 1024) 1049600
_________________________________________________________________
batch_normalization_10 (Batc (None, 1024) 4096
_________________________________________________________________
activation_9 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_11 (Dense) (None, 512) 524800
_________________________________________________________________
batch_normalization_11 (Batc (None, 512) 2048
_________________________________________________________________
activation_10 (Activation) (None, 512) 0
_________________________________________________________________
dense_12 (Dense) (None, 256) 131328
_________________________________________________________________
batch_normalization_12 (Batc (None, 256) 1024
_________________________________________________________________
activation_11 (Activation) (None, 256) 0
_________________________________________________________________
dense_13 (Dense) (None, 128) 32896
_________________________________________________________________
batch_normalization_13 (Batc (None, 128) 512
_________________________________________________________________
activation_12 (Activation) (None, 128) 0
_________________________________________________________________
dense_14 (Dense) (None, 64) 8256
_________________________________________________________________
batch_normalization_14 (Batc (None, 64) 256
_________________________________________________________________
activation_13 (Activation) (None, 64) 0
_________________________________________________________________
dense_15 (Dense) (None, 32) 2080
_________________________________________________________________
batch_normalization_15 (Batc (None, 32) 128
_________________________________________________________________
activation_14 (Activation) (None, 32) 0
_________________________________________________________________
dense_16 (Dense) (None, 16) 528
_________________________________________________________________
batch_normalization_16 (Batc (None, 16) 64
_________________________________________________________________
activation_15 (Activation) (None, 16) 0
_________________________________________________________________
dense_17 (Dense) (None, 8) 136
_________________________________________________________________
batch_normalization_17 (Batc (None, 8) 32
_________________________________________________________________
activation_16 (Activation) (None, 8) 0
_________________________________________________________________
dense_18 (Dense) (None, 4) 36
_________________________________________________________________
batch_normalization_18 (Batc (None, 4) 16
_________________________________________________________________
activation_17 (Activation) (None, 4) 0
_________________________________________________________________
dense_19 (Dense) (None, 2) 10
_________________________________________________________________
batch_normalization_19 (Batc (None, 2) 8
_________________________________________________________________
activation_18 (Activation) (None, 2) 0
_________________________________________________________________
dense_20 (Dense) (None, 1) 3
=================================================================
Total params: 2,467,113
Trainable params: 2,458,935
Non-trainable params: 8,178
_________________________________________________________________
'''
Adagrad = keras.optimizers.Adagrad(lr=0.1, epsilon=None, decay=0.0)
model.compile(optimizer=Adagrad, loss='mse')
model.load_weights(path + 'weights.hdf5')
'''训练'''
EarlyStop=EarlyStopping(monitor='mse',
patience=2,
verbose=1,
mode='auto')
Reduce=ReduceLROnPlateau(monitor='mse',
factor=0.1,
patience=1,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=0,
min_lr=0)
checkpointer = ModelCheckpoint(filepath = path + 'weights.hdf5',
verbose=1,
save_best_only=True)
history=model.fit(X_train, Y_train,
validation_data=(X_test, Y_test),
epochs=200, batch_size=200,
callbacks=[EarlyStop, Reduce, checkpointer],
verbose=2)
scores = model.evaluate(X_test, Y_test, verbose=1)
print(scores)
'''保存模型(本次最终)'''
save_path = path + 'loss=' + str(scores) + '.h5'
model.save_weights(save_path)
'''
'''