机器学习工程篇

1、数据集划分

# -*- coding: utf-8 -*-
##数据集切分
#train_test_split
from sklearn.model_selection import train_test_split
X=[[1,2,3,4],
   [11,12,13,14],
   [21,22,23,24],
   [31,32,33,34],
   [41,42,43,44],
   [51,52,53,54],
   [61,62,63,64],
   [71,72,73,74]]
y=[1,1,0,0,1,1,0,0]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4,random_state=0)
print("X_train=",X_train)
print("X_test=",X_test)
print("y_train=",y_train)
print("y_test=",y_test)
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4,random_state=0,stratify=y)
print("Stratify:X_train=",X_train)
print("Stratify:X_test=",X_test)
print("Stratify:y_train=",y_train)
print("Stratify:y_test=",y_test)

#KFold
import numpy as np
from sklearn.model_selection import KFold
X=np.array([[1,2,3,4],
   [11,12,13,14],
   [21,22,23,24],
   [31,32,33,34],
   [41,42,43,44],
   [51,52,53,54],
   [61,62,63,64],
   [71,72,73,74],
   [81,82,83,84]])
y=np.array([1,1,0,0,1,1,0,0,1])
folder=KFold(n_splits=3,random_state=0,shuffle=False)
for train_index,test_index in folder.split(X,y):
    print("Train Index:",train_index)
    print("Test Index:",test_index)
    print("X_train:",X[train_index])
    print("X_test:",X[test_index])
    print("")
shuffle_folder=KFold(n_splits=3,random_state=0,shuffle=True)
for train_index,test_index in shuffle_folder.split(X,y):
    print("Shuffled Train Index:",train_index)
    print("Shuffled Test Index:",test_index)
    print("Shuffled X_train:",X[train_index])
    print("Shuffled X_test:",X[test_index])
    print("")

#StratifiedKFold
from sklearn.model_selection import KFold,StratifiedKFold
import numpy as np
X=np.array([[1,2,3,4],
   [11,12,13,14],
   [21,22,23,24],
   [31,32,33,34],
   [41,42,43,44],
   [51,52,53,54],
   [61,62,63,64],
   [71,72,73,74]])
y=np.array([1,1,0,0,1,1,0,0])
folder=KFold(n_splits=4,random_state=0,shuffle=False)
stratified_folder=StratifiedKFold(n_splits=4,random_state=0,shuffle=False)
for train_index,test_index in folder.split(X,y):
    print("Train Index:",train_index)
    print("Test Index:",test_index)
    print("y_train:",y[train_index])
    print("y_test:",y[test_index])
    print("")

for train_index,test_index in stratified_folder.split(X,y):
    print("Stratified Train Index:",train_index)
    print("Stratified Test Index:",test_index)
    print("Stratified y_train:",y[train_index])
    print("Stratified y_test:",y[test_index])
    print("")

#LeaveOneOut
from sklearn.model_selection import LeaveOneOut
import numpy as np
X=np.array([[1,2,3,4],
           [11,12,13,14],
           [21,22,23,24],
           [31,32,33,34]])
y=np.array([1,1,0,0])

lo=LeaveOneOut()
for train_index,test_index in lo.split(X):
    print("Train Index:",train_index)
    print("Test Index:",test_index)
    print("X_train:",X[train_index])
    print("X_test:",X[test_index])
    print("")

#cross_val_score
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC

digits=load_digits()
X=digits.data
y=digits.target
result=cross_val_score(LinearSVC(),X,y,cv=10)#10折交叉划分,线性支持向量机学习器
print("Cross Val Score is:",result)


def read_dataset():
    img_list = []
    label_list = []
    dir_counter = 0
 
    path = 'C:/Users/XINH/Desktop/train'
 
    # 对路径下的所有子文件夹中的所有jpg文件进行读取并存入到一个list中
    for child_dir in os.listdir(path):
        child_path = os.path.join(path, child_dir)
        for dir_image in os.listdir(child_path):
            img = cv2.imread(os.path.join(child_path, dir_image))
            img = img/255.0
            img_list.append(img)
            label_list.append(dir_counter)
 
        dir_counter += 1
    return img_list,label_list
img_list,label_list=read_dataset()
#划分训练集,验证集和测试集,比例7:1:2
X_train,X_test,y_train,y_test=train_test_split(img_list,label_list,test_size=0.2,random_state=0,stratify=label_list)
X_train,X_valid,y_train,y_valid=train_test_split(X_train,y_train,test_size=0.125,random_state=0,stratify=y_train)

2、数据标准化

# -*- coding: utf-8 -*-
##二元化
from sklearn.preprocessing import Binarizer
X=[[1,2,3,4,5],
   [5,4,3,3,1],
   [3,3,3,3,3],
   [1,1,1,1,1]]
print("before transform:",X)
binarizer=Binarizer(threshold=2.5)
print("after transform:",binarizer.transform(X))

##独热码
from sklearn.preprocessing import OneHotEncoder
X=[[1,2,3,4,5],
   [5,4,3,2,1],
   [3,3,3,3,3],
   [1,1,1,1,1]]
print("before transform:",X)
encoder=OneHotEncoder(sparse=False)
encoder.fit(X)
print("active_features_:",encoder.active_features_)
print("feature_indices_:",encoder.feature_indices_)
print("n_values_:",encoder.n_values_)
print("after transform:",encoder.transform([[1,2,3,4,5]]))

##标准化
#MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
X=[[1,5,1,2,10],
   [2,6,3,2,7],
   [3,7,5,6,4],
   [4,8,7,8,1]]
print("before transform:",X)
scaler=MinMaxScaler(feature_range=(0,2))
scaler.fit(X)
print("min_is:",scaler.min_)
print("scale_is:",scaler.scale_)
print("data_max_is:",scaler.data_max_)
print("data_min_ is:",scaler.data_min_)
print("dta_range_ is:",scaler.data_range_)
print("after transform:",scaler.transform(X))
#MaxAbsScaler
from sklearn.preprocessing import MaxAbsScaler
X=[[1,5,1,2,10],
   [2,6,3,2,7],
   [3,7,5,6,4],
   [4,8,7,8,1]]
print("before transform:",X)
scaler=MaxAbsScaler()
scaler.fit(X)
print("scale_ is:",scaler.scale_)
print("max_abs_ is:",scaler.max_abs_)
print("afrer tansform:",scaler.transform(X))
#StandardScaler
from sklearn.preprocessing import StandardScaler
X=[[1,5,1,2,10],
   [2,6,3,2,7],
   [3,7,5,6,4],
   [4,8,7,8,1]]
print("before transform:",X)
scaler=StandardScaler()
scaler.fit(X)
print("scler_ is :",scaler.scale_)
print("mean_is :",scaler.mean_)
print("var_ is :",scaler.var_)
print("after transform:",scaler.transform(X))
##正则化
from sklearn.preprocessing import Normalizer
X=[[1,2,3,4,5],
   [5,4,3,2,1],
   [1,3,5,2,4],
   [2,4,1,3,5]]
print("before transform:",X)
normalizer=Normalizer(norm='l2')
print("after transform:",normalizer.transform(X))

3、特征提取

# -*- coding: utf-8 -*-
##过滤式特征提取
from sklearn.feature_selection import VarianceThreshold
X=[[100,1,2,3],
   [100,4,5,6],
   [100,7,8,9],
   [101,11,12,13]]
selector=VarianceThreshold(1)
selector.fit(X)
print("Variances is %s"%selector.variances_)
print("After transform is %s"%selector.transform(X))
print("The surport is %s"%selector.get_support(True))
print("After rever transform is %s"%
        selector.inverse_transform(selector.transform(X)))

#单变量特征提取
from sklearn.feature_selection import SelectKBest,f_classif
X=[   [1,2,3,4,5],
      [5,4,3,2,1],
      [3,3,3,3,3],
      [1,1,1,1,1] ]
y=[0,1,0,1]
print("before transform:",X)
selector=SelectKBest(score_func=f_classif,k=3)
selector.fit(X,y)
print("score_:",selector.scores_)
print("pvalues_:",selector.pvalues_)
print("selected index:",selector.get_support(True))
print("after transform:",selector.transform(X))

##包裹式特征提取
from sklearn.feature_selection import RFE
from sklearn.svm import LinearSVC
from sklearn.datasets import load_iris
iris=load_iris()
X=iris.data
y=iris.target
estimator=LinearSVC()
selector=RFE(estimator=estimator,n_features_to_select=2)
selector.fit(X,y)
print("N_features %s"%selector.n_features_)
print("Support is %s"%selector.support_)
print("Ranking %s"%selector.ranking_)


from sklearn.feature_selection import RFE
from sklearn.svm import LinearSVC
from  sklearn import cross_validation
from sklearn.datasets import load_iris

###加载数据
iris=load_iris()
X,y=iris.data,iris.target
###特征提取
estimator=LinearSVC()
selector=RFE(estimator=estimator,n_features_to_select=2)
X_t=selector.fit_transform(X,y)
####切分测试集与验证集
X_train,X_test,y_train,y_test=cross_validation.train_test_split(X,y,
            test_size=0.25,random_state=0,stratify=y)
X_train_t,X_test_t,y_train_t,y_test_t=cross_validation.train_test_split(X_t,y,
            test_size=0.25,random_state=0,stratify=y)
###测试与验证
clf=LinearSVC()
clf_t=LinearSVC()
clf.fit(X_train,y_train)
clf_t.fit(X_train_t,y_train_t)
print("Original DataSet:test score=%s"%(clf.score(X_test,y_test)))
print("Selected DataSet:test score=%s"%(clf_t.score(X_test_t,y_test_t)))


#RFECV
import numpy as np
from sklearn.feature_selection import RFECV
from sklearn.svm import LinearSVC
from sklearn.datasets import load_iris
iris=load_iris()
X=iris.data
y=iris.target
estimator=LinearSVC()
selector=RFECV(estimator=estimator,cv=3)
selector.fit(X,y)
print("N_teatures %s"%selector.n_features_)
print("Support is %s"%selector.support_)
print("Ranking %s"%selector.ranking_)
print("Grid Scores %s"%selector.grid_scores_)


##嵌入式特征提取
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
from sklearn.datasets import load_digits
digits=load_digits()
X=digits.data
y=digits.target
estimator=LinearSVC(penalty='l1',dual=False)
selector=SelectFromModel(estimator=estimator,threshold='mean')
selector.fit(X,y)
selector.transform(X)
print("Threshold %s"%selector.threshold_)
print("Support is %s"%selector.get_support(indices=True))

import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.datasets import load_digits,load_diabetes
from sklearn.linear_model import Lasso
def test_Lasso(*data):
    X,y=data
    alphas=np.logspace(-2,2)
    zeros=[]
    for alpha in alphas:
        regr=Lasso(alpha=alpha)
        regr.fit(X,y)
        ###计算零的个数###
        num=0
        for ele in regr.coef_:
            if abs(ele)<1e-5:
                num+=1
        zeros.append(num)
    #####绘图
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    ax.plot(alphas,zeros)
    ax.set_xlabel(r"$\alpha$")
    ax.set_xscale("log")
    ax.set_ylim(0,X.shape[1]+1)
    ax.set_ylabel("zeros in coef")
    ax.set_title("Sparsity In Lasso")
    plt.show()
def test_LinearSVC(*data):
    X,y=data
    Cs=np.logspace(-2,2)
    zeros=[]
    for C in Cs:
        clf=LinearSVC(C=C,penalty='l1',dual=False)
        clf.fit(X,y)
        ###计算零的个数###
        num=0
        for row in clf.coef_:
            for ele in row:
                if abs(ele)<1e-5:
                    num+=1
        zeros.append(num)
    #####绘图
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    ax.plot(Cs,zeros)
    ax.set_xlabel("C")
    ax.set_xscale("log")
    ax.set_ylabel("zeros in coef")
    ax.set_title("Sparsity In SVM")
    plt.show()
if __name__=='__main__':
    data=load_diabetes()
    test_Lasso(data.data,data.target)
    data=load_digits()
    test_LinearSVC(data.data,data.target)

4、参数优化

# -*- coding: utf-8 -*-
##暴力搜索寻优 GridSearchCV
from sklearn.datasets import load_digits
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
###加载数据
digits=load_digits()
X_train,X_test,y_train,y_test=train_test_split(digits.data,digits.target,test_size=0.25,random_state=0,stratify=digits.target)
####参数优化####
tuned_parameters=[{'penalty':['l1','l2'],
                   'C':[0.01,0.05,0.1,0.5,1,5,10,50,100],
                   'solver':['liblinear'],
                   'multi_class':['ovr']},

                  {'penalty':['l2'],
                   'C':[0.01,0.05,0.1,0.5,1,5,10,50,100],
                   'solver':['lbfgs'],
                   'multi_class':['ovr','multinomial']}
                   ]
clf=GridSearchCV(LogisticRegression(tol=1e-6),tuned_parameters,cv=10)
clf.fit(X_train,y_train)
print("Best paramenters set found:",clf.best_params_)
print("Grid scores:")
for params,mean_score,scores in clf.grid_scores_:
    print("\t%0.3f(+/-%0.03f) for %s"%(mean_score,scores.std()*2,params))
print("Optimized Score:",clf.score(X_test,y_test))
print("Detailed classification report:")
y_true,y_pred=y_test,clf.predict(X_test)
print(classification_report(y_true,y_pred))



##随机搜索寻优 RandomizeSearchCV
from sklearn.datasets import load_digits
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import scipy
###加载数据
digits=load_digits()
X_train,X_test,y_train,y_test=train_test_split(digits.data,digits.target,test_size=0.25,random_state=0,stratify=digits.target)
####参数优化####
tuned_parameters={'C':scipy.stats.expon(scale=100),
                   'multi_class':['ovr','multinomial']}
                   
clf=RandomizedSearchCV(LogisticRegression(penalty='l2',solver='lbfgs',tol=1e-6),
                       tuned_parameters,cv=10,scoring="accuracy",n_iter=100)
clf.fit(X_train,y_train)
print("Best paramenters set found:",clf.best_params_)
print("Randomized Grid scores:")
for params,mean_score,scores in clf.grid_scores_:
    print("\t%0.3f(+/-%0.03f) for %s"%(mean_score,scores.std()*2,params))
print("Optimized Score:",clf.score(X_test,y_test))
print("Detailed classification report:")
y_true,y_pred=y_test,clf.predict(X_test)
print(classification_report(y_true,y_pred))   

5、学习器流水线和字典学习

# -*- coding: utf-8 -*-
##学习器流水线(Pipeline)
from sklearn.datasets import load_digits
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA

'''
工作流程:先进行pca降维,然后使用Logistic回归,来分类
'''
def test_Pipeline(data):
    X_train,X_test,y_train,y_test=data
    steps=[('PCA',PCA()),
           ('LogisticRegression',LogisticRegression(C=1))]
    pipeline=Pipeline(steps)
    pipeline.fit(X_train,y_train)
    print('name steps:',pipeline.named_steps)
    print('Pipeline Score:',pipeline.score(X_test,y_test))
    
if __name__=='__main__':
    data=load_digits()
    X=data.data
    y=data.target
    test_Pipeline(cross_validation.train_test_split(X,y,test_size=0.25,
                                        random_state=0,stratify=y))


##字典学习
from sklearn.decomposition import DictionaryLearning
X=[[1,2,3,4,5],
   [6,7,8,9,10],
   [10,9,8,7,6],
   [5,4,3,2,1]]
print("before transform:",X)
dct=DictionaryLearning(n_components=3)
dct.fit(X)
print("components is:",dct.components_)
print("after transform:",dct.transform(X))

6、性能度量

# -*- coding: utf-8 -*-
"""
##分类问题的性能度量
#accuracy_score 准确率
from sklearn.metrics import accuracy_score
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,1,1,0,0]
print('Accuracy Score(normalize=True):',accuracy_score(y_true,y_pred,normalize=True))
print('Accuracy Score(normalize=False):',accuracy_score(y_true,y_pred,normalize=False))

#precision_score 查准率
from sklearn.metrics import accuracy_score,precision_score
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,0,0,0,0]
print('Accuracy Score:',accuracy_score(y_true,y_pred,normalize=True))
print('Precision Score:',precision_score(y_true,y_pred))

#recall_score 查全率
from sklearn.metrics import accuracy_score,precision_score,recall_score
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,0,0,0,0]
print('Accuracy Score:',accuracy_score(y_true,y_pred,normalize=True))
print('Precision Score:',precision_score(y_true,y_pred))
print('Recalll Score:',recall_score(y_true,y_pred))

#f1_score
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,0,0,0,0]
print('Accuracy Score:',accuracy_score(y_true,y_pred,normalize=True))
print('Precision Score:',precision_score(y_true,y_pred))
print('Recalll Score:',recall_score(y_true,y_pred))
print('F1 Score:',f1_score(y_true,y_pred))

#fbeta_score
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,fbeta_score
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,0,0,0,0]
print('Accuracy Score:',accuracy_score(y_true,y_pred,normalize=True))
print('Precision Score:',precision_score(y_true,y_pred))
print('Recalll Score:',recall_score(y_true,y_pred))
print('F1 Score:',f1_score(y_true,y_pred))
print('Fbeta Score(beta=0.001):',fbeta_score(y_true,y_pred,beta=0.001))
print('Fbeta Score(beta=1):',fbeta_score(y_true,y_pred,beta=1))
print('Fbeta Score(beta=10):',fbeta_score(y_true,y_pred,beta=10))
print('Fbeta Score(beta=10000):',fbeta_score(y_true,y_pred,beta=10000))

#classification_report
from sklearn.metrics import classification_report
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,0,0,0,0]
print('Classificiation Report:\n',classification_report(y_true,y_pred,target_names=["class_0","class_1"]))

#confusion_matrix 混淆矩阵
from sklearn.metrics import confusion_matrix
y_true=[1,1,1,1,1,0,0,0,0,0]
y_pred=[0,0,1,1,0,0,0,0,0,0]
print('Confusion Matrix:\n',confusion_matrix(y_true,y_pred,labels=[0,1]))


#precision_recall_curve
from sklearn.metrics import precision_recall_curve
from sklearn.datasets import load_iris
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import  SVC
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
import numpy as np
##加载数据
iris=load_iris()
X=iris.data
y=iris.target
#二元化标记
y = label_binarize(y, classes=[0,1,2])
n_classes = y.shape[1]
####添加噪声
np.random.seed(0)
n_samples, n_features = X.shape
X = np.c_[X, np.random.randn(n_samples, 200 * n_features)]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.5,random_state=0)
###训练模型
clf=OneVsRestClassifier(SVC(kernel='linear',probability=True,random_state=0))
clf.fit(X_train,y_train)
y_score=clf.fit(X_train,y_train).decision_function(X_test)
###获取P-R
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
precision = dict()
recall=dict()
for i in range(n_classes):
    precision[i],recall[i],_=precision_recall_curve(y_test[:,i],y_score[:,i])
    ax.plot(recall[i],precision[i],label="target=%s"%i)
ax.set_xlabel("Recall Score")
ax.set_ylabel("Precision Score")
ax.set_title("R-P")
ax.legend(loc='best')
ax.set_xlim(0,1.1)
ax.set_ylim(0,1.1)
ax.grid()
plt.show()

#roc_curve
from sklearn.metrics import roc_curve,roc_auc_score
from sklearn.datasets import load_iris
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
import numpy as np
###记载数据
iris=load_iris()
X=iris.data
y=iris.target
#二元化标记
y=label_binarize(y,classes=[0,1,2])
n_classes=y.shape[1]
####添加噪声
np.random.seed(0)
n_samples,n_features=X.shape
X=np.c_[X,np.random.randn(n_samples,200*n_features)]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.5,random_state=0)
###训练模型
clf=OneVsRestClassifier(SVC(kernel='linear',probability=True,random_state=0))
clf.fit(X_train,y_train)
y_score=clf.fit(X_train,y_train).decision_function(X_test)
###获取ROC
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
fpr = dict()
tpr = dict()
roc_auc=dict()
for i in range(n_classes):
    fpr[i],tpr[i],thresholds=roc_curve(y_test[:,i],y_score[:,i])
    ax.plot(fpr[i],tpr[i],label="target=%s"%i)
#    roc_auc[i]=roc_auc_score(fpr[i],tpr[i])
#    ax.plot(fpr[i],tpr[i],label="target=%s,auc=%s"%(i,roc_auc[i]))
    
ax.plot([0,1],[0,1],'k--')
ax.set_xlabel("FPR")
ax.set_ylabel("TPR")
ax.set_title("ROC")
ax.legend(loc="best")
ax.set_xlim(0,1.1)
ax.set_ylim(0,1.1)
ax.grid()
plt.show()

##回归问题的性能度量
#mean_absolute_error
from sklearn.metrics import mean_absolute_error
y_true=[1,1,1,1,1,2,2,2,0,0]
y_pred=[0,0,0,1,1,1,0,0,0,0]
print("Mean Absolute Error:",mean_absolute_error(y_true,y_pred))

#mean_squared_error
from sklearn.metrics import mean_squared_error,mean_absolute_error
y_true=[1,1,1,1,1,2,2,2,0,0]
y_pred=[0,0,0,1,1,1,0,0,0,0]
print("Mean Absolute Error",mean_absolute_error(y_true,y_pred))
print("Mean Squared Error",mean_squared_error(y_true,y_pred))

#验证曲线
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC
from sklearn.model_selection import validation_curve
###加载数据
digits=load_digits()
X,y=digits.data,digits.target
####获取验证曲线######
param_name="C"
param_range = np.logspace(-2,2)
train_scores,test_scores=validation_curve(LinearSVC(),X,y,param_name=param_name,
                                          param_range=param_range,cv=10,scoring="accuracy")
#####对每个C,获取10折交叉上的预测得分上的均值和方差#####
train_scores_mean=np.mean(train_scores,axis=1)
train_scores_std=np.std(train_scores,axis=1)
test_scores_mean=np.mean(test_scores,axis=1)
test_scores_std=np.std(test_scores,axis=1)
#####绘图#####
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.semilogx(param_range,train_scores_mean,label="Training Accuracy",color="r")
ax.fill_between(param_range,train_scores_mean-train_scores_std,
                train_scores_mean+train_scores_std,alpha=0.2,color="r")
ax.semilogx(param_range,test_scores_mean,label="Testing Accuracy",color="g")
ax.fill_between(param_range,test_scores_mean-test_scores_std,
                test_scores_mean+test_scores_std,alpha=0.2,color="g")
ax.set_title("Validation Curve with LinearSVC")
ax.set_xlabel("C")
ax.set_ylabel("Score")
ax.set_ylim(0,1.1)
ax.legend(loc='best')
plt.show()


#学习曲线
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC
from sklearn.model_selection import learning_curve
###加载数据
digits = load_digits()
X,y=digits.data,digits.target
####获取学习取线######
train_sizes=np.linspace(0.1,1.0,endpoint=True,dtype='float')
abs_trains_sizes,train_scores,test_scorcs=learning_curve(LinearSVC(),X,y,cv=10,scoring="accuracy",train_sizes=train_sizes)
######对每个C,获取10折交叉上的预测得分上的均值和分差#####
train_scores_mean = np.mean(train_scores,axis=1)
train_scores_std = np.std(train_scores,axis=1)
test_scores_mean = np.mean(test_scores,axis=1)
test_scores_std = np.std(test_scores,axis=1)
#######绘图######
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(abs_trains_sizes,train_scores_mean,label="Training Accuracy",color="r")
ax.fill_between(abs_trains_sizes,train_scores_mean-train_scores_std,train_scores_mean+train_scores_std,alpha=0.2,color="r")
ax.plot(abs_trains_sizes,test_scores_mean,label="Testing Accuracy",color="g")
ax.fill_between(abs_trains_sizes,test_scores_mean-test_scores_std,test_scores_mean+test_scores_std,alpha=0.2,color="g")
ax.set_title("Learning Curve with LinearSVC")
ax.set_xlabel("Sample Nums")
ax.set_ylabel("Score")
ax.set_ylim(0,1.1)
ax.legend(loc='best')
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值