(1)下载并研究数据集背景和结构
(2)任选分类或者回归算法进行算法代码实现和结果分析
数据集分析
首先,由数据集文件的文件名SouthGermanCredit.asc
可以得知,该数据集是研究德国南部的信贷问题的,然后再由附加文件codetable.txt
可以得知数据集中各个参数代表的意思:
laufkont:该用户的用户状态
laufzeit:贷款的持续时间
moral:信用历史
verw:贷款的目的
hoehe:贷款数量
sparkont:存款
beszeit:工作时间
rate:分期付款率
famges:性别和目前状态
buerge:其他债务人
wohnzeit:现在的住宅
verm:资产
alter:年龄
weikred:其他分期付款计划
wohn:目前居住的房屋
bishkred:信用分
beruf:工作
pers:承担责任的人
telef:电话号码
gastarb:是否为国外工作者
kredit:信用好坏
所以最后一列kredit
即为一行数据的结果
代码实现
代码如下:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
from sklearn.preprocessing import StandardScaler
# 二元分类分类指标
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
df = pd.read_csv('./SouthGermanCredit.asc', sep=' ')
X = df.iloc[:, 0:20] # 因为前20列都是特征值
y = df.iloc[:, 20] # 第21列才是标签
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
mlp = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 30, 20), random_state=1, learning_rate_init=.2)
# 对特征值做归一化处理,如果没有做归一化处理,最后测试的精确率只有0.72
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
mlp.fit(X_train_std, y_train)
predictions = mlp.predict(X_test_std)
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
print(classification_report(y_test, predictions))
# predictions 与 y_test
confusion_matrix = confusion_matrix(y_test, predictions)
print(confusion_matrix)
plt.matshow(confusion_matrix)
plt.title("混淆矩阵", fontproperties="SimSun", size=18)
plt.colorbar()
plt.ylabel("真实值", fontproperties="SimSun", size=18)
plt.xlabel("预测值", fontproperties="SimSun", size=18)
plt.show()
结果如下:
可以看到,经过归一化处理后的数据,预测出来的精确率达到了0.8。
但是0.8的精确率还是太小了,所以这里我又进行了一些修改,加上了老师课上用到的sigmoid函数,在6-3.py代码上进行了一点修改,然后得到如下代码:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
# 二元分类分类指标
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Soigmoid函数
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
# sigmoid导函数性质:f'(t) = f(t)(1 - f(t))
# 参数y采用sigmoid函数的返回值
def sigmoid_prime(y):
return y * (1.0 - y)
class MLP:
def __init__(self, layers, activation='sigmoid'):
"""
:参数layers: 神经网络的结构(输入层-隐含层-输出层包含的结点数列表)
:参数activation: 激活函数类型
"""
if activation == 'sigmoid': # 也可以用其它的激活函数
self.activation = sigmoid
self.activation_prime = sigmoid_prime
else:
pass
# 存储权值矩阵
self.weights = []
# range of weight values (-1,1)
# 初始化输入层和隐含层之间的权值
for i in range(1, len(layers) - 1):
r = 2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1 # add 1 for bias node
self.weights.append(r)
# print r #for teaching
# print self.weights #for teaching
# 初始化输出层权值
r = 2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1
self.weights.append(r)
# print r #for teaching
# print self.weights #for teaching
def fit(self, X, Y, learning_rate=1, epochs=10000):
# Add column of ones to X
# This is to add the bias unit to the input layer
X = np.hstack([np.ones((X.shape[0], 1)), X])
for k in range(epochs): # 训练固定次数
# Return random integers from the discrete uniform distribution in the interval [0, low).
i = np.random.randint(X.shape[0], high=None)
a = [X[i]] # 从m个输入样本中随机选一组
for l in range(len(self.weights)):
dot_value = np.dot(a[l], self.weights[l]) # 权值矩阵中每一列代表该层中的一个结点与上一层所有结点之间的权值
activation = self.activation(dot_value)
a.append(activation)
# 反向递推计算delta:从输出层开始,先算出该层的delta,再向前计算
error = Y[i] - a[-1] # 计算输出层delta
deltas = [error * self.activation_prime(a[-1])]
# 从倒数第2层开始反向计算delta
for l in range(len(a) - 2, 0, -1):
deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_prime(a[l]))
# [level3(output)->level2(hidden)] => [level2(hidden)->level3(output)]
deltas.reverse() # 逆转列表中的元素
# backpropagation
# 1. Multiply its output delta and input activation to get the gradient of the weight.
# 2. Subtract a ratio (percentage) of the gradient from the weight.
for i in range(len(self.weights)): # 逐层调整权值
layer = np.atleast_2d(a[i]) # View inputs as arrays with at least two dimensions
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * np.dot(layer.T, delta) # 每输入一次样本,就更新一次权值
def predict(self, X):
tot = []
for x in X:
a = np.concatenate((np.ones(1), np.array(x))) # a为输入向量(行向量)
for l in range(0, len(self.weights)): # 逐层计算输出
a = self.activation(np.dot(a, self.weights[l]))
tot.append(a)
return tot
if __name__ == '__main__':
df = pd.read_csv('./SouthGermanCredit.asc', sep=' ')
X = df.iloc[:, 0:20] # 因为前20列都是特征值
y = df.iloc[:, 20] # 第21列才是标签
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
sc = StandardScaler() # 对特征值做归一化处理
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_std = sc.transform(X)
mlp = MLP([20, 10, 1]) # 网络结构为:20个输入1个输出,1个隐含层包含10个结点
mlp.fit(X_std, y) # 训练网络
predictions = mlp.predict(X_test_std)
# 因为归一化以后的测试集数据预测得到的结果并不是0、1,因此这里需要手动设置一个阈值,
# 即超过这个阈值的值都转换成1,否则转换成0,而这里我设置的阈值为0.5,正好是中间值(四舍五入原则)
predictions = [int(item > 0.5) for item in predictions]
# 输出精度
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
print(classification_report(y_test, predictions))
# predictions 与 y_test
confusion_matrix = confusion_matrix(y_test, predictions)
print(confusion_matrix)
plt.matshow(confusion_matrix)
plt.title("混淆矩阵", fontproperties="SimSun", size=18)
plt.colorbar()
plt.ylabel("真实值", fontproperties="SimSun", size=18)
plt.xlabel("预测值", fontproperties="SimSun", size=18)
plt.show()
运行结果如下:
可以看到,修改后的结果精确率又有所提高,提高到了0.86。
注:在原有的代码上,我改动的地方主要在predict()函数上
def predict(self, X):
tot = []
for x in X:
a = np.concatenate((np.ones(1), np.array(x))) # a为输入向量(行向量)
for l in range(0, len(self.weights)): # 逐层计算输出
a = self.activation(np.dot(a, self.weights[l]))
tot.append(a)
return tot
这里我通过for循环将数据集的所有数据都进行预测,然后预测的结果放入一个列表中返回,然后再将该列表进行处理:
# 因为归一化以后的测试集数据预测得到的结果并不是0、1,因此这里需要手动设置一个阈值,
# 即超过这个阈值的值都转换成1,否则转换成0,而这里我设置的阈值为0.5,正好是中间值(四舍五入原则)
predictions = [int(item > 0.5) for item in predictions]