卷积神经网络进行个人信用评估

利用tensorflow的keras搭建了一个卷积神经网络,训练模型进行评估

懂行的人自己看
首先要将数据进行归一化处理,训练后的准确率可以达到70%,采用了一些已知过拟合的方法,BatchNormalization、Dropout、权值衰减等。

# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 16:06:20 2020

@author: dell
"""

import matplotlib.pyplot as plt
import numpy as np
import h5py

from sklearn import preprocessing

from keras.models import load_model
from keras import models
from keras import layers
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras import regularizers
from keras.constraints import maxnorm
# from keras.layers import Embedding, Flatten

(train_data, train_labels), (test_data, test_labels) = load_data()


train_data = np.delete(train_data, 1, axis=1)
# train_data = np.delete(train_data, [79, 82], axis=1)
test_data = np.delete(test_data, 1, axis=1)
# test_data = np.delete(test_data, [79, 82], axis=1)

percent = int( 42202*0.7 )

# np.random.shuffle(train_data)

partial_train_data = train_data[:percent, :]
val_data = train_data[percent:, :]
partial_train_data = partial_train_data.reshape((29541, 12, 8, 1))
val_data = val_data.reshape((12661, 12, 8, 1))

partial_train_labels = train_labels[:percent]
val_labels = train_labels[percent:]

partial_train_labels = to_categorical(partial_train_labels)
val_labels = to_categorical(val_labels)
test_labels = to_categorical(test_labels)


model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='Same',
                          input_shape=(12, 8, 1), 
                          kernel_regularizer=regularizers.l2(0.002)))
# model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='Same'))
model.add(layers.normalization.BatchNormalization())
model.add(layers.MaxPooling2D(2, 2))
# model.add(layers.normalization.BatchNormalization())
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3, 3), activation='relu', 
                        kernel_regularizer=regularizers.l2(0.002)))
model.add(layers.normalization.BatchNormalization())#########
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.normalization.BatchNormalization())


model.add(layers.Flatten())
model.add(layers.Dense(16, activation='relu',
                        kernel_regularizer=regularizers.l2(0.002)))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(1, activation='sigmoid'))

model.summary()

model.compile(optimizer=optimizers.RMSprop(lr=0.001),
              loss="binary_crossentropy",
              metrics=['accuracy'])

history = model.fit(partial_train_data,
          partial_train_labels,
          epochs=140,
          batch_size=2000,
          validation_data=(val_data, val_labels))

models.save_model(model, r"C:\Users\dell\Desktop\data\net_model1.h5")


history_dict = history.history
loss_values = history_dict["loss"]
val_loss_values = history_dict["val_loss"]

epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "bo", label="Traing loss")
plt.plot(epochs, val_loss_values, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show

plt.figure()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title("Traing and Validation accuracy")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

test_data = test_data.reshape(6162, 12, 8, 1)
eva_model = load_model(r"C:\Users\dell\Desktop\data\net_model1.h5")
eva_model_test = eva_model.evaluate(test_data, test_labels)
predictions = eva_model.predict(test_data)
prediction_labels = to_categorical(predictions)
# from keras.utils import plot_model
# plot_model(model, show_shapes=True, to_file=r'C:\Users\dell\Desktop\data\model.png')

下图是训练的过程:
在这里插入图片描述
在这里插入图片描述

欢迎留言,希望能给大家派上用场!

  • 5
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值