scikit-learn 算法的通用形式

1.knn算法

# 1.数据准备
from sklearn import datasets
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
x = iris.data
y = iris.target
# x,y = datasets.load_iris(return_X_y=Ture)
x_train, x_test ,y_train, y_test = train_test_split(x,y,test_size=0.3)
# 2.模型选项
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
# 3.训练
knn.fit(x_train,y_train)
# 4.预测
acc = knn.score(x_test,y_test)
print(acc)# 0.93333

2.线性回归

from sklearn import datasets
from sklearn.datasets import fetch_california_housing
from sklearn.linear_model import LinearRegression,Lasso,Ridge,ElasticNet
from sklearn.model_selection import cross_val_score
x, y = fetch_california_housing(return_X_y=True)
# 2.线性回归、lasso、岭回归、弹性网
lr = LinearRegression()
loss = -cross_val_score(lr,x,y,cv=5,scoring='neg_mean_squared_error').mean()
# 回归时将scoring=参数改成 均方误差neg_mean_squared_error
print(y.min(),y.max()) # 0.14  5.12
print(loss)
# 评价均方误差的好坏:占最大标签和最小标签的百分比
# 使用其他线性回归方法预测
lasso = Lasso(alpha=0.01)
ridge = Ridge(alpha=0.01)
elastcnet = ElasticNet(alpha=0.01)
loss1 = -cross_val_score(lasso,x,y,cv=5,scoring='neg_mean_squared_error').mean()
loss2 = -cross_val_score(ridge,x,y,cv=5,scoring='neg_mean_squared_error').mean()
loss3 = -cross_val_score(elastcnet,x,y,cv=5,scoring='neg_mean_squared_error').mean()
print(loss1,loss2,loss3)

3.逻辑回归

在这里插入图片描述
在这里插入图片描述

from sklearn.linear_model import LogisticRegression as LR
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import learning_curve
import matplotlib.pyplot as plt

x, y = load_breast_cancer(return_X_y=True)

lrl1 = LR(penalty="l1",solver="liblinear",C=1,max_iter=1000) #l1正则化必须改solver
lrl2 = LR(penalty="l2",solver="liblinear",C=1,max_iter=1000) # GD迭代1000次,默认100次

# train_size, train_acc, test_acc = learning_curve(lrl1,x,y,cv=5)
train_size, train_acc, test_acc = learning_curve(lrl2,x,y,cv=5)
plt.plot(train_size,train_acc.mean(axis=1),label='train_acc')
plt.plot(train_size,test_acc.mean(axis=1),label='test_acc')
plt.legend()
plt.show()

注明学习曲线的使用:描述测试集数量和准确率之间的关系在这里插入图片描述

4.调参

from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score,validation_curve
import matplotlib.pyplot as plt
x,y = load_iris(return_X_y=True)
# 手动调参:(1-11)循环k观察k取何时准确率最高
acc = []
for i in range(1,11):
    knn = KNeighborsClassifier(n_neighbors=i)
    acc.append(cross_val_score(knn,x,y,cv=5).mean())
plt.plot(range(1,11),acc,'o-')
plt.show()
# 参数验证曲线的调参
knn = KNeighborsClassifier()
train_acc, test_acc = validation_curve(knn,x,y,param_name='n_neighbors',
                                       param_range=range(1,11),cv=5)
plt.plot(range(1,11),train_acc.mean(axis=1),'o-',label='train_acc')
plt.plot(range(1,11),test_acc.mean(axis=1),'o-',label='test_acc')
plt.legend()
plt.show()

两种方式的图像
在这里插入图片描述
在这里插入图片描述

5.数据预处理

from sklearn import datasets,preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
X,y = datasets.load_iris(return_X_y=True)
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.13,random_state=13)
knn = KNeighborsClassifier().fit(x_train,y_train)
print(knn.score(x_test,y_test))
# 预处理
X = preprocessing.StandardScaler().fit_transform(X) # 标准化
X = preprocessing.MinMaxScaler().fit_transform(X) # 归一化,将数据压缩的0-1之间
X = preprocessing.minmax_scale(X) # 同上
X = preprocessing.RobustScaler().fit_transform(X) # 处理异常值
X = preprocessing.maxabs_scale(X) # 稀疏矩阵:除最大数的绝对值,放缩到[-1,1]

6.降维

from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
iris = load_iris()
x = iris.data
y = iris.target
pca = PCA(2) # 要降低的维数
x = pca.fit_transform(x)
plt.figure()
plt.scatter(x[y == 0, 0],x[y == 0,1],c="r",label=iris.target_names[0])
plt.scatter(x[y == 1, 0],x[y == 1,1],c="b",label=iris.target_names[1])
plt.scatter(x[y == 2, 0],x[y == 2,1],c="y",label=iris.target_names[2])
plt.legend()
plt.title("PCA of iris dataset")
plt.show()
# 选择维度
pca = PCA(n_components=0.95,svd_solver='full') # 按信息保存率n_components选择维度
x = pca.fit_transform(x)
from sklearn.decomposition import TruncatedSVD
# 使用SVN:PCA要计算协方差矩阵,因此矩阵过大时 计算资源不够 可以尝试截断SVD
svd = TruncatedSVD(2)
x = svd.fit_transform(x)

在这里插入图片描述

7.朴素贝叶斯

原理:采集数据加上先验概率,确保模型预测准确
在这里插入图片描述

from sklearn.naive_bayes import GaussianNB,BernoulliNB
from sklearn.datasets import load_digits
from sklearn.model_selection import cross_val_score

x,y =load_digits(return_X_y=True)
print(cross_val_score(GaussianNB(),x,y,cv=5,scoring='accuracy').mean())
print(cross_val_score(BernoulliNB(),x,y,cv=5,scoring='accuracy').mean())

结论:伯努利贝叶斯效果更好

8.聚类算法

聚类算法评估标准:每个簇内部密度大,簇与簇之间的距离大
轮廓系数:a是样本与同簇点的平均距离,b是与样本另一个簇最近的距离,轮廓系数e=(b-a)/max(a,b),e 越靠近1,说明与同簇点更相似,e越靠近-1,说明点与其他点更相似,e=0时说明两个簇应该是同一个

from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans,DBSCAN
from sklearn.metrics import silhouette_score
# k-Meams
x,y =make_blobs(n_samples=500,n_features=2,centers=4,random_state=22) #将数据划分为4个簇
fig, ax=plt.subplots(1,3,figsize=(12,4)) #将画布分成1行3列的窗口
ax[0].scatter(x[:,0],x[:,1],s=8) # 分别取所有样本的第一、二个特征
color = ["r","green","b","orange"]
for i in range(4): # 将真实数据画出来
    ax[1].scatter(x[y == i,0],x[y==i,1],s=8,c=color[i])
pred = KMeans(n_clusters=4,random_state=22).fit_predict(x)
for i in range(4): # 使用K-means 分类的结果
    ax[2].scatter(x[:,0],x[:,1],s=8,c=pred)
plt.show()
# 打印轮廓系数
print(silhouette_score(x,y))
print(silhouette_score(x,pred))
from sklearn.datasets import make_blobs,make_circles
import numpy as np
# DBSCAN :各种数据表现得都非常好
x1,_ = make_circles(n_samples=5000,factor=.5,noise=0.05)
x2,_ =make_blobs(n_samples=1000,n_features=2,centers=[[1.2,1.2]],cluster_std=0.1)

fig, ax = plt.subplots(1,3,figsize=(16,4))
x = np.concatenate((x1,x2))
ax[0].scatter(x[:,0],x[:,1],s=8)

from sklearn.cluster import KMeans
pred = KMeans(n_clusters=3).fit_predict(x)
ax[1].scatter(x[:,0],x[:,1],s=8,c=pred)

from sklearn.cluster import DBSCAN
pred = DBSCAN(eps=0.1,min_samples=10).fit_predict(x)
ax[2].scatter(x[:,0],x[:,1],s=8,c=pred)

9.决策树

from sklearn.datasets import load_wine
from sklearn import tree
from sklearn.model_selection import train_test_split

wine = load_wine()
x_train, x_test, y_train,y_test = train_test_split(wine.data,wine.target,test_size=0.3,random_state=22)
clf = tree.DecisionTreeClassifier(criterion='entropy',random_state=22,splitter='random'
                                  #,max_depth=10,min_smples_leaf=3,min_samples_split=3
)
# splitter表示特征的重要性都一样,默认best表示按重要度特征 特征较多时,random 可以防止过拟合
clf.fit(x_train,y_train)
acc = clf.score(x_test,y_test)
print(acc)

10.集成算法

from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier

# 集成学习:
# bagging: 模型独立,共同表决
# boosting : 模型有序,逐渐提升
x,y= load_wine(return_X_y=True)
train_x, test_x,train_y,test_y = train_test_split(x,y,test_size=0.3,random_state=0)
dtc = DecisionTreeClassifier(random_state=22).fit(train_x,train_y)
rfc = RandomForestClassifier(random_state=22).fit(train_x,train_y)
print(dtc.score(test_x,train_y))
print(rfc.score(test_x,train_y))

bgc = BaggingClassifier(random_state=22).fit(train_x,train_y)
print(bgc.score(test_x,train_y))
adc = AdaBoostClassifier(learning_rate=0.1,random_state=22).fit(train_x,train_y)
print(adc.score(test_x,train_y))
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值