机器学习项目常用代码-导入包-忽略警告-数据分析-数据清洗-特征工程-模型选择-可视化

1.导入工具包

1.1常用基本包

import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import time
import glob
import os
import warnings

1.2常用包2

from sklearn.externals import joblib

1.3特殊用包

from pydub.audio_segment import AudioSegment
from scipy.io import wavfile
from python_speech_features import mfcc#音乐分类常用包

2.导入训练包

from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer

#填充缺失值
from sklearn.preprocessing import Imputer
#标签二元化
from sklearn.preprocessing import label_binarize
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn.metrics import f1_score, precision_score, recall_score

2.1忽略警告

# 设置忽略警告
import warnings
warnings.filterwarnings('ignore')

2.2 不采用科学计数法

### 设置不适用科学计数法  #为了直观的显示数字,不采用科学计数法
np.set_printoptions(precision=3, suppress=True)
pd.set_option('display.float_format', lambda x: '%.2f' % x)

2.3 设置字体

mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False

2.4 行列内容显示

#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)

3.读取文件

3.1 常用读取文件

train_datas = pd.read_csv('./datas/happiness_train_complete.csv',sep=',',encoding='gb2312')
test_datas = pd.read_csv('./datas/happiness_test_complete.csv',sep=',',encoding='gb2312')
train_datas_Y = train_datas[['happiness']]
train_datas_X = train_datas.drop(['happiness'],axis=1)
all_datas = pd.concat(([train_datas_X,test_datas]),axis=0,ignore_index=True)
path = './iris.data'
names = ['sepal length', 'sepal width', 'petal length', 'petal width', 'cla']
df = pd.read_csv(path, header=None, names=names)

在这里插入图片描述

df = pd.read_csv('../data/LoanStats3a.csv', skiprows = 1, low_memory = False)

3.2 预处理后读取文件

datas_x = pd.read_csv('./processs_datas/train_process_datas_x.csv')
datas_y = pd.read_csv('./processs_datas/train_datas_Y.csv')
data_test = pd.read_csv('./processs_datas/test_process_datas.csv')
data_id = data_test['id']
data_test.drop(['id'],axis=1,inplace=True)

3.3 读取index和label之间的映射关系

从文件中读取index和label之间的映射关系,并返回dict

data = pd.read_csv(music_index_label_path, header=None, encoding='utf-8')
name_label_list = np.array(data).tolist()
index_label_dict = dict(map(lambda t: (t[1], t[0]), name_label_list))

3.4 文件合并/拼接

df3=pd.concat([df3_1,df3_2],axis=0)#横向拼接

4.数据分析与处理

4.1 查看数据

print(all_datas.head())
print(all_datas.tail())
df_log.info()
df_log.shape
df.describe()
df.describe().T
X = datas.iloc[:,0:2]#前两列
sales = sales.iloc[:,:13]#前13列
sales = sales.loc[:24]#前24行
df.columns
df.dtypes
df.duplicated().sum()#是否有重复值
print(all_datas.info(verbose=True,null_counts=True))
all_datas['marital_1st'].value_counts(dropna=False)
df['cla'].value_counts()#有无缺失值
#查看所有字段的缺失值数量
print(all_datas.isnull().sum())
nan_num0_list = list(nan_num[nan_num==0].index)
birth = all_datas['birth']
print(birth.value_counts())
#画图展示一下'birth'这个数据的值得统计
plt.bar(birth.value_counts().index,birth.value_counts().values,label='birth')
plt.legend()
plt.show()

4.2 分段,分类

def birth_split(x):
    if x<=1930:
        return 0
    elif x<=1940:
        return 1
    elif x<=1950:
        return 2
    elif x<=1960:
        return 3
    elif x<=1970:
        return 4
    elif x<=1980:
        return 5
    elif x<=1990:
        return 6
    else:
        return 7

    pass
all_datas['birth_s']=all_datas['birth'].map(birth_split)
all_datas.drop(['birth'],axis=1,inplace=True)
all_datas['has_edu_yer'] = all_datas['edu_yr'].map(lambda x:1 if x>0 else 0)
def parseRecord(record):
    result = []
    r = zip(names, record)
    for name,v in r:
        if name == 'cla':
            if v == 'Iris-setosa':
                result.append(1)
            elif v == 'Iris-versicolor':
                result.append(2)
            elif v == 'Iris-virginica':
                result.append(3)
            else:
                result.append(np.nan)
        else:
            result.append(float(v))
    return result
datas = df.apply(lambda r: pd.Series(parseRecord(r),index=names),axis=1)    
    
### 判断一下收入是否符合预期
def if_exp_inc(x):
    income, inc_exp = x[0],x[1]
    if income>=inc_exp:
        return 1
    else:
        return 0
all_datas['if_exp_inc'] = all_datas[['income','inc_exp']].apply(if_exp_inc,axis=1)
#y值选为第5列转为编号
y = pd.Categorical(data[4]).codes

4.3 时间特征处理

def date_format(dt):
    t = time.strptime(' '.join(dt), '%d/%m/%Y %H:%M:%S')
    return (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
X = X.apply(lambda x: pd.Series(date_format(x)),axis = 1)

4.4 其他数据处理

# ## 先合并X,Y,再删除
datas = pd.concat([datas_x,datas_y],axis=1)
# # print(datas.head())
# # print(datas.info(verbose=True,null_counts=True))
datas.replace(-8,np.nan,inplace=True)
datas.dropna(axis=0,how='any',inplace=True)
# # print(datas.shape)
# # print(datas['happiness'].value_counts(dropna=True))

4.4.1 特定取值

data=data[(data['UnitPrice']>0)&(data['Quantity']>0)]

4.4.2 创建子数据集

# 整体销售情况子数据集,包含下单日期、销售额、销量、利润、年份、月份信息
sales_data = df[['Order_Date','Sales','Quantity','Profit','year','month']]
data = data.sample(frac = 0.1, random_state=10)#取样

4.4.3 去除某列数据首字母

a = df1['物料编码'].values
c=a.tolist()
print(type(c))
out1=[]
for i in c:
    # print(i)
    b=str(i)[1:]
    out1.append(b)
#print(out1)  
df1['物料编码'] = np.array(out1)
#df1['物料编码']

4.5 groupby行列操作

sales_year = sales_data.groupby(['year','month']).sum()
#  slice(None), 是Python中的切片操作,这里用来选择全部数据
year_2011 = sales_year.loc[(2011,slice(None)),:].reset_index()
Market_Year_Sales = df.groupby(['Market', 'year']).agg({'Sales':'sum'}).reset_index().rename(columns={'Sales':'Sales_amounts'})#不同地区之间的销售情况:
productId_amount = df.groupby('Product_ID').sum()['Sales'].sort_values(ascending=False)
print(productId_amount.head(10))#前十名销售额

5.异常值,缺失值,删除数据

'''
数据处理的思路:
    0、先删除一些缺省比较多的记录,或者特征?(在训练阶段没有标签的数据我们也是不能要的)
    1、时间的数据需要进行时间数据的提取 time striptime()
    2、连续的数据进行分段处理===》转换为离散数据
    3、文本数据的提取:oneHOT,TFIDF,词向量  (文本向量化)
    4、对连续型的数据进行缺失值的填充 一般可以填充为0,均值,众数,中位数,经验数据
    5、对离散数据进行缺失值的填充 (缺失值单独作为一个类别,根据标签类别数量进行填充,经验数据) 哑编码
    对于我们处理好后的数据,还可以进行一个相关性的检验,对于一些相关系数绝对值比较大的特征只保留其中一个特征(可以考虑的,但是不是说效果就一定好,这样做好处是特征降维了,节约我们的训练成本)
    PCA,标准化,归一化特征工程这些工作是在我们把数据进行预处理后(数值型的特征)在进行的
'''

5.1 replace,rename

df.int_rate.replace('%', '', inplace = True)#10.65%
df.emp_length.replace('n/a', np.nan , inplace = True)
#非法字符替换为np.nan
new_df = df.replace('?',np.nan)
## -1 = 不适用; -2 = 不知道; -3 = 拒绝回答; -8 = 无法回答;
all_datas.replace([-1,-2,-3,-8],np.nan,inplace=True)
## 这里我们可以考虑将-8替换成3(特殊情况特殊对待)
datas_y.replace(-8,3,inplace=True)
### 在统计nan值前对数据中的异常值进行替换成nan值
## -1 = 不适用; -2 = 不知道; -3 = 拒绝回答; -8 = 无法回答;
all_datas.replace([-1,-2,-3,-8],np.nan,inplace=True)
df.term.replace(to_replace= '[^0-9]+', value = '', inplace = True, regex = True)# 60 months
df.loan_status.replace('Fully Paid', int(1), inplace = True)#取类别1
df.loan_status.replace('Charged Off', int(0), inplace = True)#取类别0
df.loan_status.replace('Does not meet the credit policy. Status:Fully Paid', np.nan, inplace = True)
df.loan_status.replace('Does not meet the credit policy. Status:Charged Off', np.nan, inplace = True)
df.dropna(subset = ['loan_status'],inplace = True)#
#只保留中文
df1['Name\n名称']=df1['Name\n名称'].str.replace(r'[^\u4e00-\u9fa5]','')
df1['Name\n名称'].value_counts()
#修改标签
df1=df1.rename(columns={'物料号': '物料编码'})
df1.columns

5.2 drop

#删除id 和 member_id
df.drop('id', 1, inplace = True)
df.drop('member_id', 1, inplace = True)
df.drop(['sub_grade','emp_title'], 1, inplace = True)
#删除空值的,全是0的,全是同一个数字的
df.dropna(1, how = 'all', inplace = True) #按照列删除
df.dropna(0, how = 'all', inplace = True) #按照行删除
df=df.loc[:, (df != 0).any(axis=0)]#去掉12个全零列
df=df.loc[(df != 0).any(axis=1),:]#行
df=df.loc[ : , ~df.columns.str.contains("^Unnamed")]#删除未命名列
df.ix[:,~((df==1).all()|(df==0).all())]#删除全为0或1的列

#统计并删除列分类很少,占比很少的列
#先统计float类型
for col in df.select_dtypes(include = ['float']).columns:
    # print('col {} has {}'.format(col, len(df[col].unique())))
#删除float中分类与实例个数不在一个数量级上的
df.drop(['collections_12_mths_ex_med','open_acc','pub_rec',], 1, inplace = True)  
#查看object类型中占比很少的数据
for col in df.select_dtypes(include = ['object']).columns:
    # print('col {} has {}'.format(col, len(df[col].unique())))
#删除object类型数据
df.drop(['term','debt_settlement_flag'],1, inplace = True)
# 对于一些缺失值比较严重的数据我们先进行删除工作 大于k(一个阈值)的进行删除
nan_numk_list = list(nan_num[nan_num>=9000].index)
all_datas = all_datas.drop(nan_numk_list,axis=1,inplace=False)

5.3 fillna

df.fillna(0, inplace = True)
df.fillna(0.0, inplace = True)
# 简单处理 填充成新的一类   特征
all_datas.fillna(-1,inplace=True)
all_datas['income'] = all_datas['income'].fillna(np.mean(all_datas['income']))
all_datas['income'] = all_datas['income'].fillna(np.mean(all_datas['income']))
# print('------------对inc_exp字段进行均值填充-----------------')
all_datas['inc_exp'] = all_datas.inc_exp.fillna(np.mean(all_datas.family_income))
all_datas['work_yr'] = all_datas.work_yr.fillna(np.mean(all_datas.work_yr))

public_list = ['public_service_1', 'public_service_2', 'public_service_3', 'public_service_4', 'public_service_5', 'public_service_6', 'public_service_7', 'public_service_8', 'public_service_9']
# print('------------对public_字段进行均值填充-----------------')
for column_name in public_list:
    # print('------------对{}字段进行均值填充-----------------'.format(column_name))
    all_datas[column_name] = all_datas[column_name].fillna(np.mean(all_datas[column_name]))
#使用Imputer给定缺失值,默认mean
imputer = Imputer(missing_values = "NaN")
X = imputer.fit_transform(X, Y)

6.特征工程

6.1 离散连续处理

list01 = [] ## 用来存储我们离散的字段
list02 = [] ## 用来存储连续的字段
### 我认为如果你这个字段的值大于40个就是连续的
for column in nan_num02k_list:
    # print(all_datas[column].value_counts(dropna=False).shape[0])
    if all_datas[column].value_counts(dropna=False).shape[0]<=40:
        list01.append(column)
    else:
        list02.append(column)

6.2 相关性,正交矩阵

#正交化处理,删除相关性高的列
cor = df.corr()#pandas 求协方差矩阵公式
cor.loc[:,:] = np.tril(cor, k=-1)
cor = cor.stack() #结构重建
print(cor[(cor>0.55) | (cor < -0.55)])
# 进行哑变量处理
df = pd.get_dummies(df)

6.3 标准化,归一化

ss = StandardScaler()#标准差标准化

#先拟合设定范围,再转换测试集
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)

6.4 降维

pca = PCA(n_components=2)
x_train = pca.fit_transform(x_train)
x_test = pca.transform(x_test)
pca.explained_variance_ratio_

7.数据划分

7.1 concat,merge

all_datas = pd.concat(([train_datas_X,test_datas]),axis=0,ignore_index=True)
#合并
df123=pd.merge(df23,df1,on="物料编码")
df123.shape
Y = df.loan_status#是否借贷
X = df.drop('loan_status', 1, inplace = False)

## 处理好后的all_datas 进行一个存储为【训练数据,训练的标签】和测试数据
train_process_datas_x = all_datas.iloc[:train_datas_X.shape[0],:]
test_process_datas = all_datas.iloc[train_datas_X.shape[0]:,:]
print(train_process_datas_x.shape)
print(test_process_datas.shape)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3, random_state = 0)

7.2 上采样 smote

from imblearn.over_sampling import SMOTE
#pip install -U imbalanced-learn
sample_solver = SMOTE(random_state=0)
X_sample ,Y_sample = sample_solver.fit_sample(x_train,y_train)#从原始的训练集采出样本,用来训练模型
# X_sample ,Y_sample = sample_solver.fit_sample(X,Y)

8.模型选择

8.1 线性回归

# 七、模型训练
model.fit(x_train,y_train)
# 八、模型评估
print('训练集准确率:',model.score(x_train,y_train))
print('测试集准确率:',model.score(x_test,y_test))
y_train_hat = model.predict(x_train)
y_test_hat = model.predict(x_test)
print('训练集MSE:',mean_squared_error(y_train,y_train_hat))
print('测试集MSE:',mean_squared_error(y_test,y_test_hat))
lr = LogisticRegression()
start= time.time()
lr.fit(x_train, y_train)
train_predict = lr.predict(x_train)
train_f1 = metrics.f1_score(train_predict, y_train)
train_acc = metrics.accuracy_score(train_predict, y_train)
train_rec = metrics.recall_score(train_predict, y_train)
print("逻辑回归模型上的效果入下:")
print("在训练集上f1_mean的值为%.4f" % train_f1, end=' ')
print("在训练集上的精确率的值为%.4f" % train_acc, end=' ')
print("在训练集上的查全率的值为%.4f" % train_rec)
test_predict = lr.predict(x_test)
test_f1 = metrics.f1_score(test_predict, y_test)
test_acc = metrics.accuracy_score(test_predict, y_test)
test_rec = metrics.recall_score(test_predict, y_test)
print("在测试集上f1_mean的值为%.4f" % test_f1, end = ' ')
print("在训练集上的精确率的值为%.4f" % test_acc, end=' ')
print("在训练集上的查全率的值为%.4f" % test_rec)
end = time.time()
print(end-start)
#系数和截距
lr.coef_
lr.intercept_

8.2 随机森林

forest = RandomForestClassifier(n_estimators=100, criterion='gini',max_depth=2,random_state=0)
forest.fit(x_train,y_train)
print("随机森林效果如下" + "=" * 30)
rf = RandomForestClassifier()
start = time.time()
rf.fit(x_train, y_train)
train_predict = rf.predict(x_train)
train_f1 = metrics.f1_score(train_predict, y_train)
train_acc = metrics.accuracy_score(train_predict, y_train)
train_rec = metrics.recall_score(train_predict, y_train)
print("在训练集上f1_mean的值为%.4f" % train_f1, end=' ')
print("在训练集上的精确率的值为%.4f" % train_acc, end=' ')
print("在训练集上的查全率的值为%.4f" % train_rec)
test_predict = rf.predict(x_test)
test_f1 = metrics.f1_score(test_predict, y_test)
test_acc = metrics.accuracy_score(test_predict, y_test)
test_rec = metrics.recall_score(test_predict, y_test)
print("在测试集上f1_mean的值为%.4f" % test_f1, end = ' ')
print("在训练集上的精确率的值为%.4f" % test_acc, end=' ')
print("在训练集上的查全率的值为%.4f" % test_rec)
end = time.time()
print(end - start)
##树的数目
estimators = [1,50,100,500]
#树的深度
depth = [1,2,3,7,15]
#错误率
err_list = []

for es in estimators:
    es_list = []
    for d in depth:
        tf = RandomForestClassifier(n_estimators=es,criterion='gini',max_depth=d, random_state=0)
        tf.fit(x_train2,y_train2)
        #计算性能指标
        st = tf.score(x_test2,y_test2)
        err = 1 - st
        es_list.append(err)
        print('%d决策树数目,%d最大深度,正确率:%.2f%%' % (es,d,st*100))
        
    err_list.append(es_list)

8.2.2 Pipline+PCA+RF

##我们这里使用网格交叉验证,随机森林分类(也可以使用回归:评估指标是mse)
'''
n_estimators=10, 决策树的棵数
criterion="gini", 
max_depth=None,  决策树的深度
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None 权重
'''
pipe = Pipeline([('pca',PCA()),
                 ('RF',RandomForestClassifier())])

params = {
    'pca__n_components':[0.6,0.7,0.8,0.9],
    'RF__n_estimators':[100,200,500],
    'RF__max_depth':[3,5,7,9],
    'RF__class_weight':['balanced','balanced_subsample',None]
}
model = GridSearchCV(estimator=pipe,param_grid=params,cv=5)
model.fit(datas_x,datas_y)
print('最优模型参数:{}'.format(model.best_params_))
print('最优模型的评分:{}'.format(model.best_score_))
## 保存一下最优的参数和评分
with open('./model/pca_rf_params.txt','w',encoding='utf-8') as writer:
    writer.write('最优模型参数:{}'.format(model.best_params_)+'\n'+'最优模型的评分:{}'.format(model.best_score_))

model = Pipeline([('pca',PCA(n_components=0.6)),
                 ('RF',RandomForestClassifier(n_estimators=100,max_depth=3))])

8.3 GBDT

print("GBDT上效果如下" + "=" * 30)
gb = GradientBoostingClassifier()
start = time.time()
gb.fit(x_train, y_train)
train_predict = gb.predict(x_train)
train_f1 = metrics.f1_score(train_predict, y_train)
train_acc = metrics.accuracy_score(train_predict, y_train)
train_rec = metrics.recall_score(train_predict, y_train)
print("在训练集上f1_mean的值为%.4f" % train_f1, end=' ')
print("在训练集上的精确率的值为%.4f" % train_acc, end=' ')
print("在训练集上的查全率的值为%.4f" % train_rec)
test_predict = gb.predict(x_test)
test_f1 = metrics.f1_score(test_predict, y_test)
test_acc = metrics.accuracy_score(test_predict, y_test)
test_rec = metrics.recall_score(test_predict, y_test)
print("在测试集上f1_mean的值为%.4f" % test_f1, end = ' ')
print("在训练集上的精确率的值为%.4f" % test_acc, end=' ')
print("在训练集上的查全率的值为%.4f" % test_rec)
end = time.time()
print(end-start)

8.4 SVM

start = time.time()
parameters ={
    'kernel':['linear','sigmoid','poly'],
    'C':[0.01, 1],
    'probability':[True, False]  
    }
clf = GridSearchCV(svm.SVC(random_state = 0), param_grid = parameters, cv = 5)
clf.fit(x_train, y_train)
print('最优参数是:',end=' ')
print(clf.best_params_)
print('最优模型准确率是:', end = ' ')
print(clf.best_score_)
end = time.time()
print(end-start)

8.5 KNN

print('开始KNN模型训练:')
knn = KNeighborsClassifier(n_neighbors=5)
model = knn.fit(data, y_train)
y_predict = model.predict(data_test)
precision = precision_score(y_test, y_predict)
recall = recall_score(y_test, y_predict)
f1mean = f1_score(y_test, y_predict)

8.6 XGBOOST

8.6.1 分类

#xgboost
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
kf = KFold(n_splits=5, random_state=42,shuffle=True)
parameters ={'max_depth':[3,5,7],'min_child_weight':list(range(1,6,2))}
print('2.Xgboost模型开始网格搜索交叉训练:')
start= time.time()
xgb1=xgb.XGBClassifier(learning_rate=0.1, n_estimators=100,
                 silent=0, objective='binary:logistic',
                 gamma=0,subsample=0.8,colsample_bytree=0.8,
                nthread=4,scale_pos_weight=1,seed=27)
grid_xgb=GridSearchCV(estimator=xgb1,param_grid=parameters,cv=kf)
print("开始训练")
grid_xgb.fit(x_train,y_train)
print("最优参数是:",grid_xgb.best_params_)
# grid_xgb.best_params_
# {'max_depth': 4, 'min_child_weight': 1}
#利用最佳模型来进行预测
train_pred = grid_xgb.predict(x_train)
print('在训练集准确率:',np.around(accuracy_score(train_pred,y_train),4))
print("开始测试")
best_xgb=grid_xgb.best_estimator_
predict_xgb=best_xgb.predict(x_test)
# 在回归问题objective一般使用reg:squarederror ,即MSE均方误差。二分类问题一般使用binary:logistic, 多分类问题一般使用multi:softmax。
print('在测试集准确率:',np.around(accuracy_score(predict_xgb,y_test),4))
2.Xgboost模型开始网格搜索交叉训练:
开始训练
最优参数是: {'max_depth': 7, 'min_child_weight': 1}
在训练集准确率: 0.6841
开始测试
在测试集准确率: 0.6811

Process finished with exit code 0

8.6.2 多分类

print('5.Xgboost模型开始训练:')
start= time.time()
clf = XGBClassifier(
    n_estimators=100,  # 迭代次数
    learning_rate=0.1,  # 步长
    max_depth=5,  # 树的最大深度
    min_child_weight=1,  # 决定最小叶子节点样本权重和
    silent=1,  # 输出运行信息
    subsample=0.8,  # 每个决策树所用的子样本占总样本的比例(作用于样本)
    colsample_bytree=0.8,  # 建立树时对特征随机采样的比例(作用于特征)典型值:0.5-1
    objective='multi:softmax',  # 多分类!!!!!!
    num_class=10,#年龄类别数,1-10
    nthread=4,
    seed=27)
print("开始训练")
clf.fit(x_train, y_train, verbose=True)
train_pred = clf.predict(x_train)
print('在训练集准确率:',np.around(accuracy_score(train_pred,y_train),4))
print("开始测试")
test_pred = clf.predict(x_test)
print('在测试集准确率:',np.around(accuracy_score(test_pred,y_test),4))
end = time.time()
print("RF耗时%.2f秒"%(end-start))

8.6.3 回归

#xgboost
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
kf = KFold(n_splits=5, random_state=42,shuffle=True)
parameters ={'max_depth':[4,5,6],'min_child_weight':list(range(1,6,2))}
xgb1=xgb.XGBRegressor(learning_rate=0.1, n_estimators=500,
                 silent=0, objective='reg:squarederror',
                 gamma=0,subsample=0.8,colsample_bytree=0.8,
                nthread=4,scale_pos_weight=1,seed=27)
grid_xgb=GridSearchCV(estimator=xgb1,param_grid=parameters,cv=kf,scoring='neg_mean_squared_error')
grid_xgb.fit(X_train,train_datas_Y)
grid_xgb.best_params_
{'max_depth': 4, 'min_child_weight': 1}
RMSE=np.sqrt(-grid_xgb.best_score_)
#0.1652553857634199
#利用最佳模型来进行预测
best_xgb=grid_xgb.best_estimator_
predict_xgb=best_xgb.predict(X_test)
# 在回归问题objective一般使用reg:squarederror ,即MSE均方误差。二分类问题一般使用binary:logistic, 多分类问题一般使用multi:softmax。

8.7 朴素贝叶斯

8.7.1 高斯贝叶斯(正态分布)

print('6.高斯贝叶斯模型开始训练:')
GNB = GaussianNB()
start= time.time()
GNB.fit(x_train, y_train)
train_predict = GNB.predict(x_train)
GNB_train_accuracy = accuracy_score(y_train,train_predict)
print('在训练集准确率:',np.around(GNB_train_accuracy,4))
print('开始测试:')
test_predict = GNB.predict(x_test)
GNB_test_accuracy = accuracy_score(y_test,test_predict)
print('在测试集准确率:',np.around(GNB_test_accuracy,4))
end = time.time()
print("耗时%.2f秒"%(end-start))
print('*'*40)

8.7.2 多项式贝叶斯

print('7.多项式贝叶斯模型开始训练:')
start= time.time()
# clf = Pipeline([
#     ('poly',PolynomialFeatures(degree=4)),
#     ('clf', MultinomialNB())  #特征属性的取值,不能为负
# ])
MNB = MultinomialNB()
MNB.fit(x_train,y_train)
#训练集准确率
y_train_hat = MNB.predict(x_train)
# accuracy_score(y_train,y_train_hat)
print('在训练集准确率:',np.around(accuracy_score(y_train,y_train_hat),4))
#测试集准确率
y_test_hat = MNB.predict(x_test)
# accuracy_score(y_test,y_test_hat)
print('在训练集准确率:',np.around(accuracy_score(y_test,y_test_hat),4))
end = time.time()
print("耗时%.2f秒"%(end-start))

8.8 SVM

8.8.1 SVC

print('8.SVM模型开始训练:')
svm = svm.SVC(C=1,kernel='linear', decision_function_shape='ovr')
start= time.time()
svm.fit(x_train, y_train)
train_predict = svm.predict(x_train)
svm_train_accuracy = accuracy_score(y_train,train_predict)
print('在训练集准确率:',np.around(svm_train_accuracy,4))
print('开始测试:')
test_predict = svm.predict(x_test)
svm_test_accuracy = accuracy_score(y_test,test_predict)
print('在测试集准确率:',np.around(svm_test_accuracy,4))
end = time.time()
print("耗时%.2f秒"%(end-start))
print('*'*40)

9.模型加载,固化,部署

9.1 模型固化加载

#模型持久化
# 实际上对于最终我们保存的模型来说,我们需要把所有数据进行训练,再保存模型
model.fit(datas_x,datas_y)
print(model.score(datas_x,datas_y))
datas_y_hat = model.predict(datas_x)
print(mean_squared_error(datas_y,datas_y_hat))
joblib.dump(model,'./model/pca_rf.pkl')
##二、加载训练好的模型
rf = joblib.load('./model/pca_rf.pkl')

10.写入文件

10.1 写入文件

#第二步:把215个文件再合并写入1个csv文件
with open('./data/result_process01', "w", encoding='utf-8') as writer:
    for l1 in list0-215:  #000-215
        file_path= './data/process01_' + l1
        print("开始合并文件:" + file_path)
            
        with open(file_path, encoding = 'utf-8') as file:
            for line in file:
                writer.writelines(line)   

10.2 保存为csv文件

### 保存为csv的数据
dir_path = './processs_datas'
import os
if os.path.exists(dir_path):
    pass
else:
    os.makedirs(dir_path)
train_process_datas_x.to_csv(dir_path+'/train_process_datas_x.csv',index=False)
test_process_datas.to_csv(dir_path+'/test_process_datas.csv',index=False)
train_datas_Y.to_csv(dir_path+'/train_datas_Y.csv',index=False)
print(train_datas_Y['happiness'].value_counts())
## 删除缺失的Y的时候,对应的x也要删除掉

10.3 递交文件

test_y_hat = rf.predict(data_test)
submit = pd.concat([pd.DataFrame(data_id,columns=['id']),pd.DataFrame(test_y_hat,columns=['happiness'])],axis=1)
submit.to_csv('./predict/rf_submit.csv',index=False)
submit = pd.concat([pd.DataFrame(x_test.user_id,columns=['user_id']),
                    pd.DataFrame(xgb_age_predict,columns=['predicted_age']),
                    pd.DataFrame(xgb_gender_predict,columns=['predicted_gender'])],axis=1)

11.可视化

11.1 折线图-线性回归预测值和真实值关系

#时间轴,作为横坐标
t = np.arange(len(X_test))
#绘制
plt.figure(facecolor='w')  #指定画布背景色为白色
plt.plot(t,Y_test,'r-',linewidth=2,label='真实值')
plt.plot(t,y_predict,'g-',linewidth=2,label='预测值')
plt.legend(loc='upper left')
plt.title('线性回归预测时间和功率的关系',fontsize=20)
plt.grid(True)
plt.show()

在这里插入图片描述

purchase_r = pivoted_counts.applymap(lambda x :1 if x>1 else np.NaN if x==0 else 0)
(purchase_r.sum()/purchase_r.count()).plot(figsize=(10,4))

在这里插入图片描述

11.2 鸢尾花KNN

x_test_len = range(len(X_test))
plt.figure(figsize=(12,9), facecolor='w')
plt.ylim(0.5,3.5)
#绘制真实值
plt.plot(x_test_len, Y_test, 'ro', markersize=6, zorder = 3, label=u'真实值')
#绘制预测值
plt.plot(x_test_len, knn_y_predict, 'yo', markersize=16, zorder = 1, label = u'预测值')
plt.legend(loc='upper right')
plt.xlabel(u'数据编号',fontsize=18)
plt.ylabel(u'种类',fontsize=18)
plt.title(u'鸢尾花分类',fontsize=20)
plt.show()

在这里插入图片描述

11.3 决策树不同特征组合分类效果

#进行特征比较
feature_pairs = [[0,1],[0,2],[0,3],[1,2],[1,3],[2,3]]
plt.figure(figsize=(9,6), facecolor = 'w')

for i,pair in enumerate(feature_pairs):
    #获取x
    x = x_prime[pair]
    
    #训练拟合
    clf = DecisionTreeClassifier(criterion='gini', max_depth=5)#用gini增益率来作为分割属性的标准
    clf.fit(x, y)
    
    #获得预测值
    y_hat = clf.predict(x)
    
    #打印拟合效果
    score = clf.score(x,y)
    y2 = y.reshape(-1)
    c = np.count_nonzero(y_hat == y)  #统计预测正确的个数
    print('特征: ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]])
    print('\t预测正确数目: :',c)
    print('\t准确率:%.2f%%' % (score*100))
    
    #绘制不同颜色区域
    N, M = 500, 500
    x1_min, x2_min = x.min()
    x1_max, x2_max = x.max()
    t1 = np.linspace(x1_min, x1_max, N)
    t2 = np.linspace(x2_min, x2_max, M)
    x1, x2 = np.meshgrid(t1,t2)    #生成网格采样点
    x_test = np.dstack((x1.flat, x2.flat))[0]#轴2合并,摞起来
    
    #进行可视化
    cm_light = mpl.colors.ListedColormap(['#A0FFA0','#FFA0A0','#A0A0FF'])
    cm_dark = mpl.colors.ListedColormap(['g','r','b'])
    
    #获取预测值
    y_hat = clf.predict(x_test)
    y_hat = y_hat.reshape(x1.shape)
    
    #绘图
    plt.subplot(2,3,i+1)
    plt.pcolormesh(x1,x2,y_hat,cmap=cm_light)  #预测值
    plt.scatter(x[pair[0]],x[pair[1]],c=y, edgecolors='k', cmap=cm_dark)  #样本散点
    plt.xlabel(iris_feature[pair[0]],fontsize=10)
    plt.ylabel(iris_feature[pair[1]],fontsize=10)
    plt.xlim(x1_min,x1_max)
    plt.ylim(x2_min,x2_max)
    plt.grid()
    plt.title(u'准确率:%.2f%%' % (score*100), fontdict={'fontsize':15})
    
plt.suptitle(u'不同特征组合对目标属性的影响',fontsize=18)
plt.tight_layout(2)
plt.subplots_adjust(top=0.92)
plt.show()

在这里插入图片描述

11.4 R0C,AUC

forest_fpr1, forest_tpr1, _ = metrics.roc_curve(label_binarize(y_test[names[-4]],classes=(0,1,2)).T[0:-1].T.ravel(), forest_y_score[0].ravel())
forest_fpr2, forest_tpr2, _ = metrics.roc_curve(label_binarize(y_test[names[-3]],classes=(0,1,2)).T[0:-1].T.ravel(), forest_y_score[1].ravel())
forest_fpr3, forest_tpr3, _ = metrics.roc_curve(label_binarize(y_test[names[-2]],classes=(0,1,2)).T[0:-1].T.ravel(), forest_y_score[2].ravel())
forest_fpr4, forest_tpr4, _ = metrics.roc_curve(label_binarize(y_test[names[-1]],classes=(0,1,2)).T[0:-1].T.ravel(), forest_y_score[3].ravel())
auc1 = metrics.auc(forest_fpr1,forest_tpr1)
auc2 = metrics.auc(forest_fpr2,forest_tpr2)
auc3 = metrics.auc(forest_fpr3,forest_tpr3)
auc4 = metrics.auc(forest_fpr4,forest_tpr4)
plt.figure(figsize=(8,6), facecolor='w')
plt.plot(forest_fpr1,forest_tpr1,c='r',lw=2,label=u'Hinselmann目标属性,AUC=%.3f' % auc1)
plt.plot(forest_fpr2,forest_tpr2,c='b',lw=2,label=u'Schiller目标属性,AUC=%.3f' % auc2)
plt.plot(forest_fpr3,forest_tpr3,c='g',lw=2,label=u'Citology目标属性,AUC=%.3f' % auc3)
plt.plot(forest_fpr4,forest_tpr4,c='y',lw=2,label=u'Biopsy目标属性,AUC=%.3f' % auc4)
#随机猜测结果
plt.plot((0,1),(0,1),c='#a0a0a0',lw=2,ls='--')
plt.xlim(-0.001, 1.001)
plt.ylim(-0.001, 1.001)
#刻度
plt.xticks(np.arange(0,1.1,0.1))
plt.yticks(np.arange(0,1.1,0.1))
#标签
plt.xlabel('False Positive Rate(FPR)', fontsize=16)
plt.ylabel('True Positive Rate(TPR)', fontsize=16)
plt.grid(b=True,ls=':')
plt.legend(loc='lower right', fancybox=True, framealpha=0.8, fontsize=12)
plt.title(u'随机森林多目标属性分类ROC曲线', fontsize=18)
plt.show()

在这里插入图片描述
11.5 RF

plt.figure(facecolor='w')
i = 0
colors = ['r','b','g','y']
lw = [1,2,4,3]
max_err = 0
min_err = 100
#获取所有最大值最小值
for es,l in zip(estimators,err_list):
    plt.plot(depth, l, c = colors[i], lw=lw[i],label = u'树数目:%d' % es)
    max_err = max((max(l),max_err))
    min_err = min((min(l),min_err))
    i+=1
    
#标签
plt.xlabel(u'树深度', fontsize=16)
plt.ylabel(u'错误率', fontsize=16)
plt.grid(b=True)
plt.legend(loc='upper left', fancybox=True, framealpha=0.8, fontsize=12)
plt.xlim(min(depth),max(depth))
plt.ylim(min_err * 0.99, max_err * 1.01)
plt.title(u'随机森林中树数目、深度和错误率的关系图', fontsize=18)
plt.show()

在这里插入图片描述

11.5 背景颜色数据表

# 对行名和列名进行重命名
sales.columns=['Sales-2011','Sales-2012','Sales-2013','Sales-2014']
sales.index=['Jau','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']

# 颜色越深,销售额越高
sales.style.background_gradient()

在这里插入图片描述

11.6 堆叠图

# 面积堆叠图
sales.plot.area(stacked=False)

在这里插入图片描述

purchase_stats_ct = purchase_stats.replace('unreg',np.NaN).apply(lambda x:pd.value_counts(x))
# 用0填充NaN
purchase_stats_ct.fillna(0).T.plot.area()

在这里插入图片描述

11.7 柱状图

sns.barplot(x='Market', y='Sales_amounts', hue='year', data = Market_Year_Sales)
plt.title('2011-2014 market sales')

在这里插入图片描述

11.8 饼图

df["Segment"].value_counts().plot(kind='pie', autopct='%.2f%%', shadow=True, figsize=(14, 6))

在这里插入图片描述
11.9 散点图

rfm.loc[rfm.label=='重要价值客户','color']='g'
rfm.loc[~(rfm.label=='重要价值客户'),'color']='r'
rfm.plot.scatter('F','R',c= rfm.color)

在这里插入图片描述

欢迎评论,大家一起交流学习总结打比赛哦!抱拳!
  • 7
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值