逻辑回归(也会发生线性回归的过拟合)自带正则化。
优点:同岭回归一样可以解决过拟合问题。
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression, SGDRegressor, LogisticRegression
#LogisticRegressionCV属于交叉验证 LogisticRegression网格搜索
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, classification_report
#分类模型评估里面sklearn.metrics 考虑召回率
from sklearn.linear_model import Ridge
import numpy as np
import pandas as pd
import joblib
def logis():
"""
逻辑回归分类进行癌症预测(根据细胞属性特征)
:return:None
"""
#构造列标签名字
column = ['Sample code number', 'Clump Thickness', 'Unidormity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclel', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class']
#读取数据
data = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data", names=column)
print(data)
#缺失值处理
data = data.replace(to_replace='?', value=np.nan)
data = data.dropna()
#进行数据的分割
x_train, x_test, y_train, y_test = train_test_split(data[column[1:10]], data[column[10]], test_size=0.25)
#j进行标准化处理
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test - std.transform(x_test)
#逻辑回归的预测()
lg = LogisticRegression(C=1.0)
lg.fit(x_train, y_train) #进行训练,利用对数自然损值不断去求最小值,优化W值
print(lg.coef_) #逻辑回归的权重参数
y_predict = lg.predict(x_test) #获取预测值
print("准确率:", lg.score(x_test,y_test))
print("召回率:", classification_report(y_test, y_predict, labels=[2, 4], target_names=["良性", "恶性"]))
return None
if __name__ == "__main__":
logis()