scikit-learn-莫凡

机器学习的几种常见方法:
1)数据+标签的监督学习
2)just数据的非监督学习
3)结合以上两者的半监督学习
4)从经验中总结提升的强化学习
5)适者生存,不适者淘汰的遗传算法

#scikit-learn
#通用学习模式
import numpy as np
#sklearn里的数据集可多了,拿来用吧
from sklearn import  datasets
#莫凡的视频中是from cross_validation现替换如下
from sklearn.model_selection import train_test_split
#综合邻近的几个点模拟出它的数值,k邻近
from sklearn.neighbors import KNeighborsClassifier

#拿出数据集中的这些花
iris=datasets.load_iris()
iris_X=iris.data#花的所有属性
iris_y=iris.target#花的4种分类
# print(iris_X[:2,:])
# #看看有几类
# print(iris_y)

#将数据分为训练集和测试集,避免使用数据时出现差错,按照指定的比例来分开
X_train,X_test,y_train,y_test=train_test_split(iris_X,iris_y,test_size=0.3)
# print(y_train)#打乱数据
knn=KNeighborsClassifier()
knn.fit(X_train,y_train)#所有的train步骤都在这边了
print(knn.predict(X_test))#用测试集进行预测
print(y_test)

输出:
[1 1 0 0 2 0 1 2 1 1 1 1 2 1 0 0 0 0 2 2 1 1 1 0 2 1 1 1 0 1 1 2 0 2 1 2 0
0 0 2 1 1 0 1 2]
[1 1 0 0 2 0 1 1 1 1 1 1 2 1 0 0 0 0 2 1 1 1 1 0 2 1 1 1 0 1 2 2 0 2 1 2 0
0 0 2 1 1 0 1 2]

#sklearn中的数据集非常有用哦,拿来锻炼
from sklearn import datasets
from sklearn.linear_model import LinearRegression

loaded_data=datasets.load_boston()
model=LinearRegression()
data_X=loaded_data.data
data_y=loaded_data.target
model.fit(data_X,data_y)
print(model.predict(data_X[:4,:]))
print(data_y[:4])

输出:
[30.00384338 25.02556238 30.56759672 28.60703649]
[24. 21.6 34.7 33.4]

#自己去创造一些data
from sklearn import datasets
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
X,y=datasets.make_regression(n_samples=100,n_features=1,n_targets=1,noise=10)
plt.scatter(X,y)
plt.show()

输出:
在这里插入图片描述

#model常用额属性和功能
from sklearn import datasets
from sklearn.linear_model import LinearRegression

loaded_data=datasets.load_boston()
model=LinearRegression()
data_X=loaded_data.data
data_y=loaded_data.target
model.fit(data_X,data_y)
# print(model.predict(data_X[:4,:]))
print(model.coef_)#y=0.1x+0.3,输出与x前面系数相关的
print(model.intercept_)#输出跟y轴的交点
print(model.get_params())#返回之前给model定义的参数
print(model.score(data_X,data_y))# R^2 coefficient of determination
#精确度的输出

输出:
[-1.08011358e-01 4.64204584e-02 2.05586264e-02 2.68673382e+00
-1.77666112e+01 3.80986521e+00 6.92224640e-04 -1.47556685e+00
3.06049479e-01 -1.23345939e-02 -9.52747232e-01 9.31168327e-03
-5.24758378e-01]
36.459488385090125
{‘copy_X’: True, ‘fit_intercept’: True, ‘n_jobs’: None, ‘normalize’: False}
0.7406426641094095

#normalization标准化数据
from sklearn import preprocessing
import numpy as np
#使得training data 和 test data分开的模块
from sklearn.model_selection import train_test_split
#生成一些用于classification的数据
from sklearn.datasets.samples_generator import make_classification
#处理这些数据,当作一个模型
from sklearn.svm import SVC
import matplotlib.pyplot as plt
# a=np.array([[10,2.7,3.6],
#            [100,5,-2],
#            [120,20,40]],dtype=np.float64)
# print(a)
# print(preprocessing.scale(a))
X,y=make_classification(n_samples=300,n_features=2,n_redundant=0,n_informative=2,
                        random_state=22,n_clusters_per_class=1,scale=100)
# plt.scatter(X[:,0],X[:,1],c=y)
# plt.show()
#给轴设置范围
# X=preprocessing.minmax_scale(X,feature_range=(-1,1))
#默认是0到1
X=preprocessing.scale(X)
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=.3)
clf=SVC()
clf.fit(X_train,y_train)
print(clf.score(X_test,y_test))

输出:
0.9222222222222223

#交叉验证 cross validation
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier

iris=load_iris()
X=iris.data
y=iris.target

X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=4)
knn=KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train,y_train)
print(knn.score(X_test,y_test))

输出:
0.9736842105263158

#交叉验证 cross validation
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier

iris=load_iris()
X=iris.data
y=iris.target

# X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=4)
# knn=KNeighborsClassifier(n_neighbors=5)
# knn.fit(X_train,y_train)
# print(knn.score(X_test,y_test))

from sklearn.model_selection import cross_val_score
knn=KNeighborsClassifier(n_neighbors=5)
#将数据集分测试与训练集的时候分成5组,然后再取平均值,这样就准确一些
scores=cross_val_score(knn,X,y,cv=5,scoring='accuracy')
#综合5个的结果更准确
print(scores.mean())

输出:
0.9733333333333334

from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
k_range=range(1,31)
k_scores=[]
for k in k_range:
    #看一下哪一个参数比较好
    knn=KNeighborsClassifier(n_neighbors=k)
    #分为了10个data
    scores=cross_val_score(knn,X,y,cv=10,scoring='accuracy')#for classification
    k_scores.append(scores.mean())
plt.plot(k_range,k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
plt.show()

输出:
在这里插入图片描述

#交叉验证 cross validation
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier

iris=load_iris()
X=iris.data
y=iris.target

# X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=4)
# knn=KNeighborsClassifier(n_neighbors=5)
# knn.fit(X_train,y_train)
# print(knn.score(X_test,y_test))

# from sklearn.model_selection import cross_val_score
# knn=KNeighborsClassifier(n_neighbors=5)
# #将数据集分测试与训练集的时候分成5组,然后再取平均值,这样就准确一些
# scores=cross_val_score(knn,X,y,cv=5,scoring='accuracy')
# #综合5个的结果更准确
# print(scores.mean())

from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
k_range=range(1,31)
k_scores=[]
for k in k_range:
    #看一下哪一个参数比较好
    knn=KNeighborsClassifier(n_neighbors=k)
    #分为了10个data
    loss=-cross_val_score(knn,X,y,cv=10,scoring='neg_mean_squared_error')#for regression
    # scores=cross_val_score(knn,X,y,cv=10,scoring='accuracy')#for classification
    # k_scores.append(scores.mean())
    k_scores.append(loss.mean())
plt.plot(k_range,k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
plt.show()

输出:
在这里插入图片描述

from sklearn.model_selection import learning_curve
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits=load_digits()
X=digits.data
y=digits.target
train_sizes,train_loss,test_loss=learning_curve(
   #取cv=10表示分成10组
    SVC(gamma=0.001),X,y,cv=10,scoring='neg_mean_squared_error',
    #记录学习的点是我整个学习的东西的10%,25%的点
    train_sizes=[0.1,0.25,0.5,0.75,1]
    )
train_loss_mean=-np.mean(train_loss,axis=1)
test_loss_mean=-np.mean(test_loss,axis=1)
plt.plot(train_sizes,train_loss_mean,'o-',color='r',
         label='Training')
plt.plot(train_sizes,test_loss_mean,'o-',color='g',
         label='Cross-validation')

plt.xlabel('Training examples')
plt.ylabel('Loss')
plt.legend(loc='best')
plt.show()

输出:
在这里插入图片描述

from sklearn.model_selection import learning_curve
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits=load_digits()
X=digits.data
y=digits.target
train_sizes,train_loss,test_loss=learning_curve(
   #取cv=10表示分成10组
    SVC(gamma=0.01),X,y,cv=10,scoring='neg_mean_squared_error',
    #记录学习的点是我整个学习的东西的10%,25%的点
    train_sizes=[0.1,0.25,0.5,0.75,1]
    )
train_loss_mean=-np.mean(train_loss,axis=1)
test_loss_mean=-np.mean(test_loss,axis=1)
plt.plot(train_sizes,train_loss_mean,'o-',color='r',
         label='Training')
plt.plot(train_sizes,test_loss_mean,'o-',color='g',
         label='Cross-validation')

plt.xlabel('Training examples')
plt.ylabel('Loss')
plt.legend(loc='best')
plt.show()

输出:
在这里插入图片描述


from sklearn.model_selection import validation_curve
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits=load_digits()
X=digits.data
y=digits.target
param_range=np.logspace(-6,-2.3,5)
train_loss,test_loss=validation_curve(
   #取cv=10表示分成10组
    SVC(),X,y,param_name='gamma',param_range=param_range,cv=10,scoring='neg_mean_squared_error')

train_loss_mean=-np.mean(train_loss,axis=1)
test_loss_mean=-np.mean(test_loss,axis=1)
plt.plot(param_range,train_loss_mean,'o-',color='r',
         label='Training')
plt.plot(param_range,test_loss_mean,'o-',color='g',
         label='Cross-validation')

plt.xlabel('gamma')
plt.ylabel('Loss')
plt.legend(loc='best')
plt.show()

输出:
在这里插入图片描述

#保存模型
from sklearn import svm
from sklearn import datasets

# clf=svm.SVC()
iris=datasets.load_iris()
X,y=iris.data,iris.target
# clf.fit(X,y)

#method1:pickle
#将训练好的模型存起来
import pickle
# with open('dataset/clf.pickle','wb') as f:
#     pickle.dump(clf,f)

#把保存好的模型读出来
with open('dataset/clf.pickle','rb') as f:
    clf2=pickle.load(f)
    print(clf2.predict(X[0:1]))

输出:
[0]
在这里插入图片描述

#保存模型
#保存模型
from sklearn import svm
from sklearn import datasets

clf=svm.SVC()
iris=datasets.load_iris()
X,y=iris.data,iris.target
clf.fit(X,y)

#method1:pickle
#将训练好的模型存起来
# import pickle
#save
# with open('dataset/clf.pickle','wb') as f:
#     pickle.dump(clf,f)

#把保存好的模型读出来
#restore
# with open('dataset/clf.pickle','rb') as f:
#     clf2=pickle.load(f)
#     print(clf2.predict(X[0:1]))

#method2:joblib
from sklearn.externals import joblib
#Save
joblib.dump(clf,'dataset/clf.pkl')
#restore
clf3=joblib.load('dataset/clf.pkl')
print(clf3.predict(X[0:1]))

输出:
[0]
/home/sjj/.local/lib/python3.6/site-packages/sklearn/svm/base.py:193: FutureWarning: The default value of gamma will change from ‘auto’ to ‘scale’ in version 0.22 to account better for unscaled features. Set gamma explicitly to ‘auto’ or ‘scale’ to avoid this warning.
“avoid this warning.”, FutureWarning)
/home/sjj/.local/lib/python3.6/site-packages/sklearn/externals/joblib/init.py:15: DeprecationWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.
warnings.warn(msg, category=DeprecationWarning)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值