基于逻辑回归模型对sklearn数据鸢尾花进行分类预测
## import package
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# data prepare
from sklearn.datasets import load_iris
data = load_iris()
iris_target = data.target
iris_features = pd.DataFrame(data = data.data, columns = data.feature_names )
iris_features.info()
print(iris_features.head())
print(iris_features.tail())
print(iris_target)
print(pd.Series(iris_target).value_counts())
print(iris_features.describe())
iris_all = iris_features.copy()
iris_all['target'] = iris_target
'''
sns.pairplot(data = iris_all, diag_kind = 'hist', hue = 'target')
#sns.pairplot(data = iris_all, kind = 'reg', diag_kind = 'kde', hue = 'target')
plt.show()
for col in iris_features.columns:
sns.boxplot(x='target', y=col, saturation=0.5,palette='pastel', data=iris_all)
plt.title(col)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, projection='3d')
# plt
iris_all_class0 = iris_all[iris_all['target']==0].values
iris_all_class1 = iris_all[iris_all['target']==1].values
iris_all_class2 = iris_all[iris_all['target']==2].values
# 'setosa'(0), 'versicolor'(1), 'virginica'(2)
ax.scatter(iris_all_class0[:,0], iris_all_class0[:,1], iris_all_class0[:,2],label='setosa')
ax.scatter(iris_all_class1[:,0], iris_all_class1[:,1], iris_all_class1[:,2],label='versicolor')
ax.scatter(iris_all_class2[:,0], iris_all_class2[:,1], iris_all_class2[:,2],label='virginica')
plt.legend()
plt.show()
'''
# model begin
from sklearn.model_selection import train_test_split
# select calss 0 and 1
iris_features_part = iris_features.iloc[:100]
iris_target_part = iris_target[:100]
# train:test = 8:2
x_train, x_test, y_train, y_test = train_test_split(iris_features_part, iris_target_part,
test_size = 0.2, random_state = 2020)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(random_state = 0, solver = 'lbfgs') # ni Newton
T = clf.fit(x_train, y_train)
print(T)
print('weight of w', clf.coef_)
print('weight of w0', clf.intercept_)
train_predict = clf.predict(x_train)
test_predict = clf.predict(x_test)
from sklearn import metrics
print('The accurate of LogisticRegression is:', metrics.accuracy_score(y_train, train_predict))
print('The accurate of LogisticRegression is:', metrics.accuracy_score(y_test, test_predict))
confusion_matrix_result = metrics.confusion_matrix(test_predict, y_test)
print('The confusion matrix result:\n',confusion_matrix_result)
plt.figure(figsize=(8, 6))
sns.heatmap(confusion_matrix_result, annot=True, cmap='Blues')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
x_train, x_test, y_train, y_test = train_test_split(iris_features, iris_target, test_size= 0.2,
random_state = 2020)
clf = LogisticRegression(random_state = 0, solver = 'lbfgs')
clf.fit(x_train, y_train)
print(clf.coef_, clf.intercept_)
train_predict = clf.predict(x_train)
test_predict = clf.predict(x_test)
train_predict_proba = clf.predict_proba(x_train)
test_predict_proba = clf.predict_proba(x_test)
print('test predict proba:\n', test_predict_proba)
print('accuract is:\n', metrics.accuracy_score(y_train, train_predict))
print('accuract is:\n', metrics.accuracy_score(y_test, test_predict))
confusion_matrix_result = metrics.confusion_matrix(test_predict, y_test)
print(confusion_matrix_result)
plt.figure(figsize=(8, 6))
sns.heatmap(confusion_matrix_result, annot=True, cmap='Blues')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()