以一个简单的例子说明应用pandas进行数据相关性分析和机器学习之逻辑斯蒂回归二分类。重点在于了解将数据集划分为训练集和测试集的库函数:train_test_split,直接调用逻辑斯蒂回归的库函数:LogisticRegression。实际应用jupyter notebook环境比较直观方便。
import pandas as pd
from sklearn.model_selection import train_test_split
#x,y,z分别取10个数,假设xi+yi>50时zi=1,xi+yi<50时,zi=0
a=[10,20,30,20,50,100,90,10,30,25]#列表a
b=[50,10,60,20,40,10,-20,-20,10,24]#列表b
c=[1,0,1,0,1,1,1,0,0,0]#列表c
data={"x":a,"y":b,"z":c}#将列表a,b转换成字典
data_df = pd.DataFrame(data)#将字典转换成为dataframe,属性名分别是x,y,z
print(data_df)
# x,y,z特征相关性分析,相关性取值为-1~1,0表示不相关
correlation = data_df.corr()
print("相关系数矩阵:\n")
print(correlation)
#划分训练集和测试集
data_X = data_df[['x','y']] #x,y作为输入
data_Y = data_df[['z']] #z作为输出
##划分训练集占比0.8,训练集占比0.2 随机数种子为10(任意数)
X_train, X_test, Y_train, Y_test = train_test_split(data_X,data_Y,test_size=0.2, random_state=10)
print("X_train=",X_train)
print("X_test=",X_test)
print("Y_train=",Y_train)
print("Y_test=",Y_test)
# Logistic Regression
# 逻辑斯蒂回归
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
pred_y = logreg.predict(X_test).astype(int) # 输出预测结果
print(pred_y)
acc_log = round(logreg.score(X_test, Y_test) * 100, 2) #预测值与标签值对比计算准确率
print('acc_log: ', acc_log)
from sklearn.metrics import confusion_matrix
print('混淆矩阵: \n', confusion_matrix(Y_test, pred_y))