回归:线性回归、岭回归、Lasso回归、Logistic回归(常用作分类器)
回归树与提升树
Gradient Boosting Decision Tree(GBDT)
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.preprocessing import Normalizer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.decomposition import PCA
def hr_preprocessing(sl=False,le=False,npr=False,amh=False,tsc=False,wa=False,pl5=False,dp=False,slr=False,lower_d=False,ld_n=1):
df=pd.read_csv("HR.csv") #读入数据
#1.清洗数据
df=df.dropna(subset=["satisfaction_level","last_evaluation"])
df=df[df["satisfaction_level"]<=1][df["salary"]!="nme"]
#2.得到标注
label=df["left"]
df=df.drop("left",axis=1) #axis=1以列删除
#3.特征选择
#4.特征处理:标准化,归一化
scaler_lst=[sl,le,npr,amh,tsc,wa,pl5]
column_lst=["satisfaction_level","last_evaluation","number_project",\
"average_monthly_hours","time_spend_company","Work_accident",\
"promotion_last_5years"]
for i in range(len(scaler_lst)):
if not scaler_lst[i]:
df[column_lst[i]]=\
MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
#reshape(-1,1)列,reshape(1,-1)[0]二维向量取第零个
else:
df[column_lst[i]]=\
StandardScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
scaler_lst=[slr,dp]
column_lst=["salary","department"]
for i in range(len(scaler_lst)):
if not scaler_lst[i]:
if column_lst[i]=="salary":
df[column_lst[i]]=[map_salary(s) for s in df["salary"].values]
else:
df[column_lst[i]]=LabelEncoder().fit_transform(df[column_lst[i]])
df[column_lst[i]]=MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]#再归一化或标准化也可不进行
else:
df=pd.get_dummies(df,columns=[column_lst[i]]) #pandas提供的独热
if lower_d:
#return LinearDiscriminantAnalysis(n_components=ld_n)
#LDA这里的n_components不能大于标注的类的个数,有限值,改用无限制的PCA方法
return PCA(n_components=ld_n).fit_transform(df.values),label
return df,label
#由于LabelEncoder会默认按英文字母排序,为了让low为0,medium为1,high为2,需自定义
d=dict([("low",0),("medium",1),("high",2)])
def map_salary(s):
return d.get(s,0) #如果没有找到就返回0,默认是低收入人群
def regr_test(features,label):
print("x",features)
print("y",label)
from sklearn.linear_model import LinearRegression,Ridge,Lasso#线性回归,岭回归,Lasso回归
regr=LinearRegression() #线性回归
#regr=Ridge(alpha=0.8) #岭回归
#regr=Lasso(alpha=0.001) #Lasso回归
regr.fit(features.values,label.values)
y_pred=regr.predict(features.values)
print("Coef",regr.coef_) #显示模型估计的参数
from sklearn.metrics import mean_squared_error
print("MSE",mean_squared_error(y_pred,label.values)) #评价模型的误差
def hr_modeling(features,label):
#切分训练集和验证集(测试集)
from sklearn.model_selection import train_test_split
f_v=features.values #原先的数据是DataFrame,装换为数值,得到特征值
f_names=features.columns.values #得到特征名称
l_v=label.values
x_tt,x_validation,y_tt,y_validation=train_test_split(f_v,l_v,test_size=0.2)
#将训练集再切分为训练集和测试集
x_train,x_test,y_train,y_test=train_test_split(x_tt,y_tt,test_size=0.25)
#引入评价指标
from sklearn.metrics import accuracy_score,recall_score,f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
models=[]
models.append(("LogisticRegression",LogisticRegression()))#Logistic回归,常用作分类器
models.append(("GBDT",GradientBoostingClassifier(max_depth=6,n_estimators=100))) #回归树和提升树
for clf_name,clf in models:
clf.fit(x_train,y_train)
xy_lst=[(x_train,y_train),(x_validation,y_validation),(x_test,y_test)]
for i in range(len(xy_lst)):
x_part=xy_lst[i][0] #为遍历中的第0部分
y_part=xy_lst[i][1] #为遍历中的第1部分
y_pred=clf.predict(x_part)
print(i) #i是下标,0表示训练集,1表示验证集,2表示测试集
print(clf_name,"ACC:",accuracy_score(y_part,y_pred))
print(clf_name,"REC:",recall_score(y_part,y_pred))
print(clf_name,"F-score:",f1_score(y_part,y_pred))
def main():
features,label=hr_preprocessing()
regr_test(features[["number_project","average_monthly_hours"]],features["last_evaluation"])
hr_modeling(features,label)
if __name__=="__main__":
main()